Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * intel_pstate.c: Native P state management for Intel processors
4 *
5 * (C) Copyright 2012 Intel Corporation
6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/kernel_stat.h>
13#include <linux/module.h>
14#include <linux/ktime.h>
15#include <linux/hrtimer.h>
16#include <linux/tick.h>
17#include <linux/slab.h>
18#include <linux/sched/cpufreq.h>
19#include <linux/list.h>
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/sysfs.h>
23#include <linux/types.h>
24#include <linux/fs.h>
25#include <linux/acpi.h>
26#include <linux/vmalloc.h>
27#include <linux/pm_qos.h>
28#include <trace/events/power.h>
29
30#include <asm/cpu.h>
31#include <asm/div64.h>
32#include <asm/msr.h>
33#include <asm/cpu_device_id.h>
34#include <asm/cpufeature.h>
35#include <asm/intel-family.h>
36#include "../drivers/thermal/intel/thermal_interrupt.h"
37
38#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
39
40#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
41#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000
42#define INTEL_CPUFREQ_TRANSITION_DELAY 500
43
44#ifdef CONFIG_ACPI
45#include <acpi/processor.h>
46#include <acpi/cppc_acpi.h>
47#endif
48
49#define FRAC_BITS 8
50#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
51#define fp_toint(X) ((X) >> FRAC_BITS)
52
53#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
54
55#define EXT_BITS 6
56#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
57#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
58#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
59
60static inline int32_t mul_fp(int32_t x, int32_t y)
61{
62 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
63}
64
65static inline int32_t div_fp(s64 x, s64 y)
66{
67 return div64_s64((int64_t)x << FRAC_BITS, y);
68}
69
70static inline int ceiling_fp(int32_t x)
71{
72 int mask, ret;
73
74 ret = fp_toint(x);
75 mask = (1 << FRAC_BITS) - 1;
76 if (x & mask)
77 ret += 1;
78 return ret;
79}
80
81static inline u64 mul_ext_fp(u64 x, u64 y)
82{
83 return (x * y) >> EXT_FRAC_BITS;
84}
85
86static inline u64 div_ext_fp(u64 x, u64 y)
87{
88 return div64_u64(x << EXT_FRAC_BITS, y);
89}
90
91/**
92 * struct sample - Store performance sample
93 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
94 * performance during last sample period
95 * @busy_scaled: Scaled busy value which is used to calculate next
96 * P state. This can be different than core_avg_perf
97 * to account for cpu idle period
98 * @aperf: Difference of actual performance frequency clock count
99 * read from APERF MSR between last and current sample
100 * @mperf: Difference of maximum performance frequency clock count
101 * read from MPERF MSR between last and current sample
102 * @tsc: Difference of time stamp counter between last and
103 * current sample
104 * @time: Current time from scheduler
105 *
106 * This structure is used in the cpudata structure to store performance sample
107 * data for choosing next P State.
108 */
109struct sample {
110 int32_t core_avg_perf;
111 int32_t busy_scaled;
112 u64 aperf;
113 u64 mperf;
114 u64 tsc;
115 u64 time;
116};
117
118/**
119 * struct pstate_data - Store P state data
120 * @current_pstate: Current requested P state
121 * @min_pstate: Min P state possible for this platform
122 * @max_pstate: Max P state possible for this platform
123 * @max_pstate_physical:This is physical Max P state for a processor
124 * This can be higher than the max_pstate which can
125 * be limited by platform thermal design power limits
126 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
127 * @scaling: Scaling factor between performance and frequency
128 * @turbo_pstate: Max Turbo P state possible for this platform
129 * @min_freq: @min_pstate frequency in cpufreq units
130 * @max_freq: @max_pstate frequency in cpufreq units
131 * @turbo_freq: @turbo_pstate frequency in cpufreq units
132 *
133 * Stores the per cpu model P state limits and current P state.
134 */
135struct pstate_data {
136 int current_pstate;
137 int min_pstate;
138 int max_pstate;
139 int max_pstate_physical;
140 int perf_ctl_scaling;
141 int scaling;
142 int turbo_pstate;
143 unsigned int min_freq;
144 unsigned int max_freq;
145 unsigned int turbo_freq;
146};
147
148/**
149 * struct vid_data - Stores voltage information data
150 * @min: VID data for this platform corresponding to
151 * the lowest P state
152 * @max: VID data corresponding to the highest P State.
153 * @turbo: VID data for turbo P state
154 * @ratio: Ratio of (vid max - vid min) /
155 * (max P state - Min P State)
156 *
157 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
158 * This data is used in Atom platforms, where in addition to target P state,
159 * the voltage data needs to be specified to select next P State.
160 */
161struct vid_data {
162 int min;
163 int max;
164 int turbo;
165 int32_t ratio;
166};
167
168/**
169 * struct global_params - Global parameters, mostly tunable via sysfs.
170 * @no_turbo: Whether or not to use turbo P-states.
171 * @turbo_disabled: Whether or not turbo P-states are available at all,
172 * based on the MSR_IA32_MISC_ENABLE value and whether or
173 * not the maximum reported turbo P-state is different from
174 * the maximum reported non-turbo one.
175 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
176 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
177 * P-state capacity.
178 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
179 * P-state capacity.
180 */
181struct global_params {
182 bool no_turbo;
183 bool turbo_disabled;
184 bool turbo_disabled_mf;
185 int max_perf_pct;
186 int min_perf_pct;
187};
188
189/**
190 * struct cpudata - Per CPU instance data storage
191 * @cpu: CPU number for this instance data
192 * @policy: CPUFreq policy value
193 * @update_util: CPUFreq utility callback information
194 * @update_util_set: CPUFreq utility callback is set
195 * @iowait_boost: iowait-related boost fraction
196 * @last_update: Time of the last update.
197 * @pstate: Stores P state limits for this CPU
198 * @vid: Stores VID limits for this CPU
199 * @last_sample_time: Last Sample time
200 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference
201 * @prev_aperf: Last APERF value read from APERF MSR
202 * @prev_mperf: Last MPERF value read from MPERF MSR
203 * @prev_tsc: Last timestamp counter (TSC) value
204 * @prev_cummulative_iowait: IO Wait time difference from last and
205 * current sample
206 * @sample: Storage for storing last Sample data
207 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
208 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
209 * @acpi_perf_data: Stores ACPI perf information read from _PSS
210 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
211 * @epp_powersave: Last saved HWP energy performance preference
212 * (EPP) or energy performance bias (EPB),
213 * when policy switched to performance
214 * @epp_policy: Last saved policy used to set EPP/EPB
215 * @epp_default: Power on default HWP energy performance
216 * preference/bias
217 * @epp_cached Cached HWP energy-performance preference value
218 * @hwp_req_cached: Cached value of the last HWP Request MSR
219 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
220 * @last_io_update: Last time when IO wake flag was set
221 * @sched_flags: Store scheduler flags for possible cross CPU update
222 * @hwp_boost_min: Last HWP boosted min performance
223 * @suspended: Whether or not the driver has been suspended.
224 * @hwp_notify_work: workqueue for HWP notifications.
225 *
226 * This structure stores per CPU instance data for all CPUs.
227 */
228struct cpudata {
229 int cpu;
230
231 unsigned int policy;
232 struct update_util_data update_util;
233 bool update_util_set;
234
235 struct pstate_data pstate;
236 struct vid_data vid;
237
238 u64 last_update;
239 u64 last_sample_time;
240 u64 aperf_mperf_shift;
241 u64 prev_aperf;
242 u64 prev_mperf;
243 u64 prev_tsc;
244 u64 prev_cummulative_iowait;
245 struct sample sample;
246 int32_t min_perf_ratio;
247 int32_t max_perf_ratio;
248#ifdef CONFIG_ACPI
249 struct acpi_processor_performance acpi_perf_data;
250 bool valid_pss_table;
251#endif
252 unsigned int iowait_boost;
253 s16 epp_powersave;
254 s16 epp_policy;
255 s16 epp_default;
256 s16 epp_cached;
257 u64 hwp_req_cached;
258 u64 hwp_cap_cached;
259 u64 last_io_update;
260 unsigned int sched_flags;
261 u32 hwp_boost_min;
262 bool suspended;
263 struct delayed_work hwp_notify_work;
264};
265
266static struct cpudata **all_cpu_data;
267
268/**
269 * struct pstate_funcs - Per CPU model specific callbacks
270 * @get_max: Callback to get maximum non turbo effective P state
271 * @get_max_physical: Callback to get maximum non turbo physical P state
272 * @get_min: Callback to get minimum P state
273 * @get_turbo: Callback to get turbo P state
274 * @get_scaling: Callback to get frequency scaling factor
275 * @get_cpu_scaling: Get frequency scaling factor for a given cpu
276 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
277 * @get_val: Callback to convert P state to actual MSR write value
278 * @get_vid: Callback to get VID data for Atom platforms
279 *
280 * Core and Atom CPU models have different way to get P State limits. This
281 * structure is used to store those callbacks.
282 */
283struct pstate_funcs {
284 int (*get_max)(int cpu);
285 int (*get_max_physical)(int cpu);
286 int (*get_min)(int cpu);
287 int (*get_turbo)(int cpu);
288 int (*get_scaling)(void);
289 int (*get_cpu_scaling)(int cpu);
290 int (*get_aperf_mperf_shift)(void);
291 u64 (*get_val)(struct cpudata*, int pstate);
292 void (*get_vid)(struct cpudata *);
293};
294
295static struct pstate_funcs pstate_funcs __read_mostly;
296
297static int hwp_active __read_mostly;
298static int hwp_mode_bdw __read_mostly;
299static bool per_cpu_limits __read_mostly;
300static bool hwp_boost __read_mostly;
301static bool hwp_forced __read_mostly;
302
303static struct cpufreq_driver *intel_pstate_driver __read_mostly;
304
305#define HYBRID_SCALING_FACTOR 78741
306#define HYBRID_SCALING_FACTOR_MTL 80000
307
308static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
309
310static inline int core_get_scaling(void)
311{
312 return 100000;
313}
314
315#ifdef CONFIG_ACPI
316static bool acpi_ppc;
317#endif
318
319static struct global_params global;
320
321static DEFINE_MUTEX(intel_pstate_driver_lock);
322static DEFINE_MUTEX(intel_pstate_limits_lock);
323
324#ifdef CONFIG_ACPI
325
326static bool intel_pstate_acpi_pm_profile_server(void)
327{
328 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
329 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
330 return true;
331
332 return false;
333}
334
335static bool intel_pstate_get_ppc_enable_status(void)
336{
337 if (intel_pstate_acpi_pm_profile_server())
338 return true;
339
340 return acpi_ppc;
341}
342
343#ifdef CONFIG_ACPI_CPPC_LIB
344
345/* The work item is needed to avoid CPU hotplug locking issues */
346static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
347{
348 sched_set_itmt_support();
349}
350
351static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
352
353#define CPPC_MAX_PERF U8_MAX
354
355static void intel_pstate_set_itmt_prio(int cpu)
356{
357 struct cppc_perf_caps cppc_perf;
358 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
359 int ret;
360
361 ret = cppc_get_perf_caps(cpu, &cppc_perf);
362 if (ret)
363 return;
364
365 /*
366 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
367 * In this case we can't use CPPC.highest_perf to enable ITMT.
368 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
369 */
370 if (cppc_perf.highest_perf == CPPC_MAX_PERF)
371 cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
372
373 /*
374 * The priorities can be set regardless of whether or not
375 * sched_set_itmt_support(true) has been called and it is valid to
376 * update them at any time after it has been called.
377 */
378 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
379
380 if (max_highest_perf <= min_highest_perf) {
381 if (cppc_perf.highest_perf > max_highest_perf)
382 max_highest_perf = cppc_perf.highest_perf;
383
384 if (cppc_perf.highest_perf < min_highest_perf)
385 min_highest_perf = cppc_perf.highest_perf;
386
387 if (max_highest_perf > min_highest_perf) {
388 /*
389 * This code can be run during CPU online under the
390 * CPU hotplug locks, so sched_set_itmt_support()
391 * cannot be called from here. Queue up a work item
392 * to invoke it.
393 */
394 schedule_work(&sched_itmt_work);
395 }
396 }
397}
398
399static int intel_pstate_get_cppc_guaranteed(int cpu)
400{
401 struct cppc_perf_caps cppc_perf;
402 int ret;
403
404 ret = cppc_get_perf_caps(cpu, &cppc_perf);
405 if (ret)
406 return ret;
407
408 if (cppc_perf.guaranteed_perf)
409 return cppc_perf.guaranteed_perf;
410
411 return cppc_perf.nominal_perf;
412}
413
414static int intel_pstate_cppc_get_scaling(int cpu)
415{
416 struct cppc_perf_caps cppc_perf;
417 int ret;
418
419 ret = cppc_get_perf_caps(cpu, &cppc_perf);
420
421 /*
422 * If the nominal frequency and the nominal performance are not
423 * zero and the ratio between them is not 100, return the hybrid
424 * scaling factor.
425 */
426 if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
427 cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
428 return hybrid_scaling_factor;
429
430 return core_get_scaling();
431}
432
433#else /* CONFIG_ACPI_CPPC_LIB */
434static inline void intel_pstate_set_itmt_prio(int cpu)
435{
436}
437#endif /* CONFIG_ACPI_CPPC_LIB */
438
439static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
440{
441 struct cpudata *cpu;
442 int ret;
443 int i;
444
445 if (hwp_active) {
446 intel_pstate_set_itmt_prio(policy->cpu);
447 return;
448 }
449
450 if (!intel_pstate_get_ppc_enable_status())
451 return;
452
453 cpu = all_cpu_data[policy->cpu];
454
455 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
456 policy->cpu);
457 if (ret)
458 return;
459
460 /*
461 * Check if the control value in _PSS is for PERF_CTL MSR, which should
462 * guarantee that the states returned by it map to the states in our
463 * list directly.
464 */
465 if (cpu->acpi_perf_data.control_register.space_id !=
466 ACPI_ADR_SPACE_FIXED_HARDWARE)
467 goto err;
468
469 /*
470 * If there is only one entry _PSS, simply ignore _PSS and continue as
471 * usual without taking _PSS into account
472 */
473 if (cpu->acpi_perf_data.state_count < 2)
474 goto err;
475
476 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
477 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
478 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
479 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
480 (u32) cpu->acpi_perf_data.states[i].core_frequency,
481 (u32) cpu->acpi_perf_data.states[i].power,
482 (u32) cpu->acpi_perf_data.states[i].control);
483 }
484
485 cpu->valid_pss_table = true;
486 pr_debug("_PPC limits will be enforced\n");
487
488 return;
489
490 err:
491 cpu->valid_pss_table = false;
492 acpi_processor_unregister_performance(policy->cpu);
493}
494
495static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
496{
497 struct cpudata *cpu;
498
499 cpu = all_cpu_data[policy->cpu];
500 if (!cpu->valid_pss_table)
501 return;
502
503 acpi_processor_unregister_performance(policy->cpu);
504}
505#else /* CONFIG_ACPI */
506static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
507{
508}
509
510static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
511{
512}
513
514static inline bool intel_pstate_acpi_pm_profile_server(void)
515{
516 return false;
517}
518#endif /* CONFIG_ACPI */
519
520#ifndef CONFIG_ACPI_CPPC_LIB
521static inline int intel_pstate_get_cppc_guaranteed(int cpu)
522{
523 return -ENOTSUPP;
524}
525
526static int intel_pstate_cppc_get_scaling(int cpu)
527{
528 return core_get_scaling();
529}
530#endif /* CONFIG_ACPI_CPPC_LIB */
531
532static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
533 unsigned int relation)
534{
535 if (freq == cpu->pstate.turbo_freq)
536 return cpu->pstate.turbo_pstate;
537
538 if (freq == cpu->pstate.max_freq)
539 return cpu->pstate.max_pstate;
540
541 switch (relation) {
542 case CPUFREQ_RELATION_H:
543 return freq / cpu->pstate.scaling;
544 case CPUFREQ_RELATION_C:
545 return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
546 }
547
548 return DIV_ROUND_UP(freq, cpu->pstate.scaling);
549}
550
551static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
552{
553 return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
554}
555
556/**
557 * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
558 * @cpu: Target CPU.
559 *
560 * On hybrid processors, HWP may expose more performance levels than there are
561 * P-states accessible through the PERF_CTL interface. If that happens, the
562 * scaling factor between HWP performance levels and CPU frequency will be less
563 * than the scaling factor between P-state values and CPU frequency.
564 *
565 * In that case, adjust the CPU parameters used in computations accordingly.
566 */
567static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
568{
569 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
570 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
571 int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
572 int scaling = cpu->pstate.scaling;
573 int freq;
574
575 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
576 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
577 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
578 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
579 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
580 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
581
582 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
583 perf_ctl_scaling);
584 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
585 perf_ctl_scaling);
586
587 freq = perf_ctl_max_phys * perf_ctl_scaling;
588 cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
589
590 freq = cpu->pstate.min_pstate * perf_ctl_scaling;
591 cpu->pstate.min_freq = freq;
592 /*
593 * Cast the min P-state value retrieved via pstate_funcs.get_min() to
594 * the effective range of HWP performance levels.
595 */
596 cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
597}
598
599static inline void update_turbo_state(void)
600{
601 u64 misc_en;
602
603 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
604 global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
605}
606
607static int min_perf_pct_min(void)
608{
609 struct cpudata *cpu = all_cpu_data[0];
610 int turbo_pstate = cpu->pstate.turbo_pstate;
611
612 return turbo_pstate ?
613 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
614}
615
616static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
617{
618 u64 epb;
619 int ret;
620
621 if (!boot_cpu_has(X86_FEATURE_EPB))
622 return -ENXIO;
623
624 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
625 if (ret)
626 return (s16)ret;
627
628 return (s16)(epb & 0x0f);
629}
630
631static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
632{
633 s16 epp;
634
635 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
636 /*
637 * When hwp_req_data is 0, means that caller didn't read
638 * MSR_HWP_REQUEST, so need to read and get EPP.
639 */
640 if (!hwp_req_data) {
641 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
642 &hwp_req_data);
643 if (epp)
644 return epp;
645 }
646 epp = (hwp_req_data >> 24) & 0xff;
647 } else {
648 /* When there is no EPP present, HWP uses EPB settings */
649 epp = intel_pstate_get_epb(cpu_data);
650 }
651
652 return epp;
653}
654
655static int intel_pstate_set_epb(int cpu, s16 pref)
656{
657 u64 epb;
658 int ret;
659
660 if (!boot_cpu_has(X86_FEATURE_EPB))
661 return -ENXIO;
662
663 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
664 if (ret)
665 return ret;
666
667 epb = (epb & ~0x0f) | pref;
668 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
669
670 return 0;
671}
672
673/*
674 * EPP/EPB display strings corresponding to EPP index in the
675 * energy_perf_strings[]
676 * index String
677 *-------------------------------------
678 * 0 default
679 * 1 performance
680 * 2 balance_performance
681 * 3 balance_power
682 * 4 power
683 */
684
685enum energy_perf_value_index {
686 EPP_INDEX_DEFAULT = 0,
687 EPP_INDEX_PERFORMANCE,
688 EPP_INDEX_BALANCE_PERFORMANCE,
689 EPP_INDEX_BALANCE_POWERSAVE,
690 EPP_INDEX_POWERSAVE,
691};
692
693static const char * const energy_perf_strings[] = {
694 [EPP_INDEX_DEFAULT] = "default",
695 [EPP_INDEX_PERFORMANCE] = "performance",
696 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
697 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
698 [EPP_INDEX_POWERSAVE] = "power",
699 NULL
700};
701static unsigned int epp_values[] = {
702 [EPP_INDEX_DEFAULT] = 0, /* Unused index */
703 [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
704 [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
705 [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
706 [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
707};
708
709static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
710{
711 s16 epp;
712 int index = -EINVAL;
713
714 *raw_epp = 0;
715 epp = intel_pstate_get_epp(cpu_data, 0);
716 if (epp < 0)
717 return epp;
718
719 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
720 if (epp == epp_values[EPP_INDEX_PERFORMANCE])
721 return EPP_INDEX_PERFORMANCE;
722 if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
723 return EPP_INDEX_BALANCE_PERFORMANCE;
724 if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
725 return EPP_INDEX_BALANCE_POWERSAVE;
726 if (epp == epp_values[EPP_INDEX_POWERSAVE])
727 return EPP_INDEX_POWERSAVE;
728 *raw_epp = epp;
729 return 0;
730 } else if (boot_cpu_has(X86_FEATURE_EPB)) {
731 /*
732 * Range:
733 * 0x00-0x03 : Performance
734 * 0x04-0x07 : Balance performance
735 * 0x08-0x0B : Balance power
736 * 0x0C-0x0F : Power
737 * The EPB is a 4 bit value, but our ranges restrict the
738 * value which can be set. Here only using top two bits
739 * effectively.
740 */
741 index = (epp >> 2) + 1;
742 }
743
744 return index;
745}
746
747static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
748{
749 int ret;
750
751 /*
752 * Use the cached HWP Request MSR value, because in the active mode the
753 * register itself may be updated by intel_pstate_hwp_boost_up() or
754 * intel_pstate_hwp_boost_down() at any time.
755 */
756 u64 value = READ_ONCE(cpu->hwp_req_cached);
757
758 value &= ~GENMASK_ULL(31, 24);
759 value |= (u64)epp << 24;
760 /*
761 * The only other updater of hwp_req_cached in the active mode,
762 * intel_pstate_hwp_set(), is called under the same lock as this
763 * function, so it cannot run in parallel with the update below.
764 */
765 WRITE_ONCE(cpu->hwp_req_cached, value);
766 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
767 if (!ret)
768 cpu->epp_cached = epp;
769
770 return ret;
771}
772
773static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
774 int pref_index, bool use_raw,
775 u32 raw_epp)
776{
777 int epp = -EINVAL;
778 int ret;
779
780 if (!pref_index)
781 epp = cpu_data->epp_default;
782
783 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
784 if (use_raw)
785 epp = raw_epp;
786 else if (epp == -EINVAL)
787 epp = epp_values[pref_index];
788
789 /*
790 * To avoid confusion, refuse to set EPP to any values different
791 * from 0 (performance) if the current policy is "performance",
792 * because those values would be overridden.
793 */
794 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
795 return -EBUSY;
796
797 ret = intel_pstate_set_epp(cpu_data, epp);
798 } else {
799 if (epp == -EINVAL)
800 epp = (pref_index - 1) << 2;
801 ret = intel_pstate_set_epb(cpu_data->cpu, epp);
802 }
803
804 return ret;
805}
806
807static ssize_t show_energy_performance_available_preferences(
808 struct cpufreq_policy *policy, char *buf)
809{
810 int i = 0;
811 int ret = 0;
812
813 while (energy_perf_strings[i] != NULL)
814 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
815
816 ret += sprintf(&buf[ret], "\n");
817
818 return ret;
819}
820
821cpufreq_freq_attr_ro(energy_performance_available_preferences);
822
823static struct cpufreq_driver intel_pstate;
824
825static ssize_t store_energy_performance_preference(
826 struct cpufreq_policy *policy, const char *buf, size_t count)
827{
828 struct cpudata *cpu = all_cpu_data[policy->cpu];
829 char str_preference[21];
830 bool raw = false;
831 ssize_t ret;
832 u32 epp = 0;
833
834 ret = sscanf(buf, "%20s", str_preference);
835 if (ret != 1)
836 return -EINVAL;
837
838 ret = match_string(energy_perf_strings, -1, str_preference);
839 if (ret < 0) {
840 if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
841 return ret;
842
843 ret = kstrtouint(buf, 10, &epp);
844 if (ret)
845 return ret;
846
847 if (epp > 255)
848 return -EINVAL;
849
850 raw = true;
851 }
852
853 /*
854 * This function runs with the policy R/W semaphore held, which
855 * guarantees that the driver pointer will not change while it is
856 * running.
857 */
858 if (!intel_pstate_driver)
859 return -EAGAIN;
860
861 mutex_lock(&intel_pstate_limits_lock);
862
863 if (intel_pstate_driver == &intel_pstate) {
864 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
865 } else {
866 /*
867 * In the passive mode the governor needs to be stopped on the
868 * target CPU before the EPP update and restarted after it,
869 * which is super-heavy-weight, so make sure it is worth doing
870 * upfront.
871 */
872 if (!raw)
873 epp = ret ? epp_values[ret] : cpu->epp_default;
874
875 if (cpu->epp_cached != epp) {
876 int err;
877
878 cpufreq_stop_governor(policy);
879 ret = intel_pstate_set_epp(cpu, epp);
880 err = cpufreq_start_governor(policy);
881 if (!ret)
882 ret = err;
883 } else {
884 ret = 0;
885 }
886 }
887
888 mutex_unlock(&intel_pstate_limits_lock);
889
890 return ret ?: count;
891}
892
893static ssize_t show_energy_performance_preference(
894 struct cpufreq_policy *policy, char *buf)
895{
896 struct cpudata *cpu_data = all_cpu_data[policy->cpu];
897 int preference, raw_epp;
898
899 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
900 if (preference < 0)
901 return preference;
902
903 if (raw_epp)
904 return sprintf(buf, "%d\n", raw_epp);
905 else
906 return sprintf(buf, "%s\n", energy_perf_strings[preference]);
907}
908
909cpufreq_freq_attr_rw(energy_performance_preference);
910
911static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
912{
913 struct cpudata *cpu = all_cpu_data[policy->cpu];
914 int ratio, freq;
915
916 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
917 if (ratio <= 0) {
918 u64 cap;
919
920 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
921 ratio = HWP_GUARANTEED_PERF(cap);
922 }
923
924 freq = ratio * cpu->pstate.scaling;
925 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
926 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
927
928 return sprintf(buf, "%d\n", freq);
929}
930
931cpufreq_freq_attr_ro(base_frequency);
932
933static struct freq_attr *hwp_cpufreq_attrs[] = {
934 &energy_performance_preference,
935 &energy_performance_available_preferences,
936 &base_frequency,
937 NULL,
938};
939
940static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
941{
942 u64 cap;
943
944 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
945 WRITE_ONCE(cpu->hwp_cap_cached, cap);
946 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
947 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
948}
949
950static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
951{
952 int scaling = cpu->pstate.scaling;
953
954 __intel_pstate_get_hwp_cap(cpu);
955
956 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
957 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
958 if (scaling != cpu->pstate.perf_ctl_scaling) {
959 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
960
961 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
962 perf_ctl_scaling);
963 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
964 perf_ctl_scaling);
965 }
966}
967
968static void intel_pstate_hwp_set(unsigned int cpu)
969{
970 struct cpudata *cpu_data = all_cpu_data[cpu];
971 int max, min;
972 u64 value;
973 s16 epp;
974
975 max = cpu_data->max_perf_ratio;
976 min = cpu_data->min_perf_ratio;
977
978 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
979 min = max;
980
981 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
982
983 value &= ~HWP_MIN_PERF(~0L);
984 value |= HWP_MIN_PERF(min);
985
986 value &= ~HWP_MAX_PERF(~0L);
987 value |= HWP_MAX_PERF(max);
988
989 if (cpu_data->epp_policy == cpu_data->policy)
990 goto skip_epp;
991
992 cpu_data->epp_policy = cpu_data->policy;
993
994 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
995 epp = intel_pstate_get_epp(cpu_data, value);
996 cpu_data->epp_powersave = epp;
997 /* If EPP read was failed, then don't try to write */
998 if (epp < 0)
999 goto skip_epp;
1000
1001 epp = 0;
1002 } else {
1003 /* skip setting EPP, when saved value is invalid */
1004 if (cpu_data->epp_powersave < 0)
1005 goto skip_epp;
1006
1007 /*
1008 * No need to restore EPP when it is not zero. This
1009 * means:
1010 * - Policy is not changed
1011 * - user has manually changed
1012 * - Error reading EPB
1013 */
1014 epp = intel_pstate_get_epp(cpu_data, value);
1015 if (epp)
1016 goto skip_epp;
1017
1018 epp = cpu_data->epp_powersave;
1019 }
1020 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
1021 value &= ~GENMASK_ULL(31, 24);
1022 value |= (u64)epp << 24;
1023 } else {
1024 intel_pstate_set_epb(cpu, epp);
1025 }
1026skip_epp:
1027 WRITE_ONCE(cpu_data->hwp_req_cached, value);
1028 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
1029}
1030
1031static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
1032
1033static void intel_pstate_hwp_offline(struct cpudata *cpu)
1034{
1035 u64 value = READ_ONCE(cpu->hwp_req_cached);
1036 int min_perf;
1037
1038 intel_pstate_disable_hwp_interrupt(cpu);
1039
1040 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
1041 /*
1042 * In case the EPP has been set to "performance" by the
1043 * active mode "performance" scaling algorithm, replace that
1044 * temporary value with the cached EPP one.
1045 */
1046 value &= ~GENMASK_ULL(31, 24);
1047 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
1048 /*
1049 * However, make sure that EPP will be set to "performance" when
1050 * the CPU is brought back online again and the "performance"
1051 * scaling algorithm is still in effect.
1052 */
1053 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
1054 }
1055
1056 /*
1057 * Clear the desired perf field in the cached HWP request value to
1058 * prevent nonzero desired values from being leaked into the active
1059 * mode.
1060 */
1061 value &= ~HWP_DESIRED_PERF(~0L);
1062 WRITE_ONCE(cpu->hwp_req_cached, value);
1063
1064 value &= ~GENMASK_ULL(31, 0);
1065 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
1066
1067 /* Set hwp_max = hwp_min */
1068 value |= HWP_MAX_PERF(min_perf);
1069 value |= HWP_MIN_PERF(min_perf);
1070
1071 /* Set EPP to min */
1072 if (boot_cpu_has(X86_FEATURE_HWP_EPP))
1073 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
1074
1075 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
1076}
1077
1078#define POWER_CTL_EE_ENABLE 1
1079#define POWER_CTL_EE_DISABLE 2
1080
1081static int power_ctl_ee_state;
1082
1083static void set_power_ctl_ee_state(bool input)
1084{
1085 u64 power_ctl;
1086
1087 mutex_lock(&intel_pstate_driver_lock);
1088 rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1089 if (input) {
1090 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
1091 power_ctl_ee_state = POWER_CTL_EE_ENABLE;
1092 } else {
1093 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1094 power_ctl_ee_state = POWER_CTL_EE_DISABLE;
1095 }
1096 wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
1097 mutex_unlock(&intel_pstate_driver_lock);
1098}
1099
1100static void intel_pstate_hwp_enable(struct cpudata *cpudata);
1101
1102static void intel_pstate_hwp_reenable(struct cpudata *cpu)
1103{
1104 intel_pstate_hwp_enable(cpu);
1105 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
1106}
1107
1108static int intel_pstate_suspend(struct cpufreq_policy *policy)
1109{
1110 struct cpudata *cpu = all_cpu_data[policy->cpu];
1111
1112 pr_debug("CPU %d suspending\n", cpu->cpu);
1113
1114 cpu->suspended = true;
1115
1116 /* disable HWP interrupt and cancel any pending work */
1117 intel_pstate_disable_hwp_interrupt(cpu);
1118
1119 return 0;
1120}
1121
1122static int intel_pstate_resume(struct cpufreq_policy *policy)
1123{
1124 struct cpudata *cpu = all_cpu_data[policy->cpu];
1125
1126 pr_debug("CPU %d resuming\n", cpu->cpu);
1127
1128 /* Only restore if the system default is changed */
1129 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
1130 set_power_ctl_ee_state(true);
1131 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
1132 set_power_ctl_ee_state(false);
1133
1134 if (cpu->suspended && hwp_active) {
1135 mutex_lock(&intel_pstate_limits_lock);
1136
1137 /* Re-enable HWP, because "online" has not done that. */
1138 intel_pstate_hwp_reenable(cpu);
1139
1140 mutex_unlock(&intel_pstate_limits_lock);
1141 }
1142
1143 cpu->suspended = false;
1144
1145 return 0;
1146}
1147
1148static void intel_pstate_update_policies(void)
1149{
1150 int cpu;
1151
1152 for_each_possible_cpu(cpu)
1153 cpufreq_update_policy(cpu);
1154}
1155
1156static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
1157 struct cpufreq_policy *policy)
1158{
1159 policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
1160 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
1161 refresh_frequency_limits(policy);
1162}
1163
1164static void intel_pstate_update_max_freq(unsigned int cpu)
1165{
1166 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
1167
1168 if (!policy)
1169 return;
1170
1171 __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
1172
1173 cpufreq_cpu_release(policy);
1174}
1175
1176static void intel_pstate_update_limits(unsigned int cpu)
1177{
1178 mutex_lock(&intel_pstate_driver_lock);
1179
1180 update_turbo_state();
1181 /*
1182 * If turbo has been turned on or off globally, policy limits for
1183 * all CPUs need to be updated to reflect that.
1184 */
1185 if (global.turbo_disabled_mf != global.turbo_disabled) {
1186 global.turbo_disabled_mf = global.turbo_disabled;
1187 arch_set_max_freq_ratio(global.turbo_disabled);
1188 for_each_possible_cpu(cpu)
1189 intel_pstate_update_max_freq(cpu);
1190 } else {
1191 cpufreq_update_policy(cpu);
1192 }
1193
1194 mutex_unlock(&intel_pstate_driver_lock);
1195}
1196
1197/************************** sysfs begin ************************/
1198#define show_one(file_name, object) \
1199 static ssize_t show_##file_name \
1200 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
1201 { \
1202 return sprintf(buf, "%u\n", global.object); \
1203 }
1204
1205static ssize_t intel_pstate_show_status(char *buf);
1206static int intel_pstate_update_status(const char *buf, size_t size);
1207
1208static ssize_t show_status(struct kobject *kobj,
1209 struct kobj_attribute *attr, char *buf)
1210{
1211 ssize_t ret;
1212
1213 mutex_lock(&intel_pstate_driver_lock);
1214 ret = intel_pstate_show_status(buf);
1215 mutex_unlock(&intel_pstate_driver_lock);
1216
1217 return ret;
1218}
1219
1220static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
1221 const char *buf, size_t count)
1222{
1223 char *p = memchr(buf, '\n', count);
1224 int ret;
1225
1226 mutex_lock(&intel_pstate_driver_lock);
1227 ret = intel_pstate_update_status(buf, p ? p - buf : count);
1228 mutex_unlock(&intel_pstate_driver_lock);
1229
1230 return ret < 0 ? ret : count;
1231}
1232
1233static ssize_t show_turbo_pct(struct kobject *kobj,
1234 struct kobj_attribute *attr, char *buf)
1235{
1236 struct cpudata *cpu;
1237 int total, no_turbo, turbo_pct;
1238 uint32_t turbo_fp;
1239
1240 mutex_lock(&intel_pstate_driver_lock);
1241
1242 if (!intel_pstate_driver) {
1243 mutex_unlock(&intel_pstate_driver_lock);
1244 return -EAGAIN;
1245 }
1246
1247 cpu = all_cpu_data[0];
1248
1249 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1250 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1251 turbo_fp = div_fp(no_turbo, total);
1252 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
1253
1254 mutex_unlock(&intel_pstate_driver_lock);
1255
1256 return sprintf(buf, "%u\n", turbo_pct);
1257}
1258
1259static ssize_t show_num_pstates(struct kobject *kobj,
1260 struct kobj_attribute *attr, char *buf)
1261{
1262 struct cpudata *cpu;
1263 int total;
1264
1265 mutex_lock(&intel_pstate_driver_lock);
1266
1267 if (!intel_pstate_driver) {
1268 mutex_unlock(&intel_pstate_driver_lock);
1269 return -EAGAIN;
1270 }
1271
1272 cpu = all_cpu_data[0];
1273 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1274
1275 mutex_unlock(&intel_pstate_driver_lock);
1276
1277 return sprintf(buf, "%u\n", total);
1278}
1279
1280static ssize_t show_no_turbo(struct kobject *kobj,
1281 struct kobj_attribute *attr, char *buf)
1282{
1283 ssize_t ret;
1284
1285 mutex_lock(&intel_pstate_driver_lock);
1286
1287 if (!intel_pstate_driver) {
1288 mutex_unlock(&intel_pstate_driver_lock);
1289 return -EAGAIN;
1290 }
1291
1292 update_turbo_state();
1293 if (global.turbo_disabled)
1294 ret = sprintf(buf, "%u\n", global.turbo_disabled);
1295 else
1296 ret = sprintf(buf, "%u\n", global.no_turbo);
1297
1298 mutex_unlock(&intel_pstate_driver_lock);
1299
1300 return ret;
1301}
1302
1303static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1304 const char *buf, size_t count)
1305{
1306 unsigned int input;
1307 int ret;
1308
1309 ret = sscanf(buf, "%u", &input);
1310 if (ret != 1)
1311 return -EINVAL;
1312
1313 mutex_lock(&intel_pstate_driver_lock);
1314
1315 if (!intel_pstate_driver) {
1316 mutex_unlock(&intel_pstate_driver_lock);
1317 return -EAGAIN;
1318 }
1319
1320 mutex_lock(&intel_pstate_limits_lock);
1321
1322 update_turbo_state();
1323 if (global.turbo_disabled) {
1324 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
1325 mutex_unlock(&intel_pstate_limits_lock);
1326 mutex_unlock(&intel_pstate_driver_lock);
1327 return -EPERM;
1328 }
1329
1330 global.no_turbo = clamp_t(int, input, 0, 1);
1331
1332 if (global.no_turbo) {
1333 struct cpudata *cpu = all_cpu_data[0];
1334 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1335
1336 /* Squash the global minimum into the permitted range. */
1337 if (global.min_perf_pct > pct)
1338 global.min_perf_pct = pct;
1339 }
1340
1341 mutex_unlock(&intel_pstate_limits_lock);
1342
1343 intel_pstate_update_policies();
1344 arch_set_max_freq_ratio(global.no_turbo);
1345
1346 mutex_unlock(&intel_pstate_driver_lock);
1347
1348 return count;
1349}
1350
1351static void update_qos_request(enum freq_qos_req_type type)
1352{
1353 struct freq_qos_request *req;
1354 struct cpufreq_policy *policy;
1355 int i;
1356
1357 for_each_possible_cpu(i) {
1358 struct cpudata *cpu = all_cpu_data[i];
1359 unsigned int freq, perf_pct;
1360
1361 policy = cpufreq_cpu_get(i);
1362 if (!policy)
1363 continue;
1364
1365 req = policy->driver_data;
1366 cpufreq_cpu_put(policy);
1367
1368 if (!req)
1369 continue;
1370
1371 if (hwp_active)
1372 intel_pstate_get_hwp_cap(cpu);
1373
1374 if (type == FREQ_QOS_MIN) {
1375 perf_pct = global.min_perf_pct;
1376 } else {
1377 req++;
1378 perf_pct = global.max_perf_pct;
1379 }
1380
1381 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
1382
1383 if (freq_qos_update_request(req, freq) < 0)
1384 pr_warn("Failed to update freq constraint: CPU%d\n", i);
1385 }
1386}
1387
1388static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1389 const char *buf, size_t count)
1390{
1391 unsigned int input;
1392 int ret;
1393
1394 ret = sscanf(buf, "%u", &input);
1395 if (ret != 1)
1396 return -EINVAL;
1397
1398 mutex_lock(&intel_pstate_driver_lock);
1399
1400 if (!intel_pstate_driver) {
1401 mutex_unlock(&intel_pstate_driver_lock);
1402 return -EAGAIN;
1403 }
1404
1405 mutex_lock(&intel_pstate_limits_lock);
1406
1407 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1408
1409 mutex_unlock(&intel_pstate_limits_lock);
1410
1411 if (intel_pstate_driver == &intel_pstate)
1412 intel_pstate_update_policies();
1413 else
1414 update_qos_request(FREQ_QOS_MAX);
1415
1416 mutex_unlock(&intel_pstate_driver_lock);
1417
1418 return count;
1419}
1420
1421static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1422 const char *buf, size_t count)
1423{
1424 unsigned int input;
1425 int ret;
1426
1427 ret = sscanf(buf, "%u", &input);
1428 if (ret != 1)
1429 return -EINVAL;
1430
1431 mutex_lock(&intel_pstate_driver_lock);
1432
1433 if (!intel_pstate_driver) {
1434 mutex_unlock(&intel_pstate_driver_lock);
1435 return -EAGAIN;
1436 }
1437
1438 mutex_lock(&intel_pstate_limits_lock);
1439
1440 global.min_perf_pct = clamp_t(int, input,
1441 min_perf_pct_min(), global.max_perf_pct);
1442
1443 mutex_unlock(&intel_pstate_limits_lock);
1444
1445 if (intel_pstate_driver == &intel_pstate)
1446 intel_pstate_update_policies();
1447 else
1448 update_qos_request(FREQ_QOS_MIN);
1449
1450 mutex_unlock(&intel_pstate_driver_lock);
1451
1452 return count;
1453}
1454
1455static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1456 struct kobj_attribute *attr, char *buf)
1457{
1458 return sprintf(buf, "%u\n", hwp_boost);
1459}
1460
1461static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1462 struct kobj_attribute *b,
1463 const char *buf, size_t count)
1464{
1465 unsigned int input;
1466 int ret;
1467
1468 ret = kstrtouint(buf, 10, &input);
1469 if (ret)
1470 return ret;
1471
1472 mutex_lock(&intel_pstate_driver_lock);
1473 hwp_boost = !!input;
1474 intel_pstate_update_policies();
1475 mutex_unlock(&intel_pstate_driver_lock);
1476
1477 return count;
1478}
1479
1480static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
1481 char *buf)
1482{
1483 u64 power_ctl;
1484 int enable;
1485
1486 rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1487 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
1488 return sprintf(buf, "%d\n", !enable);
1489}
1490
1491static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
1492 const char *buf, size_t count)
1493{
1494 bool input;
1495 int ret;
1496
1497 ret = kstrtobool(buf, &input);
1498 if (ret)
1499 return ret;
1500
1501 set_power_ctl_ee_state(input);
1502
1503 return count;
1504}
1505
1506show_one(max_perf_pct, max_perf_pct);
1507show_one(min_perf_pct, min_perf_pct);
1508
1509define_one_global_rw(status);
1510define_one_global_rw(no_turbo);
1511define_one_global_rw(max_perf_pct);
1512define_one_global_rw(min_perf_pct);
1513define_one_global_ro(turbo_pct);
1514define_one_global_ro(num_pstates);
1515define_one_global_rw(hwp_dynamic_boost);
1516define_one_global_rw(energy_efficiency);
1517
1518static struct attribute *intel_pstate_attributes[] = {
1519 &status.attr,
1520 &no_turbo.attr,
1521 NULL
1522};
1523
1524static const struct attribute_group intel_pstate_attr_group = {
1525 .attrs = intel_pstate_attributes,
1526};
1527
1528static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
1529
1530static struct kobject *intel_pstate_kobject;
1531
1532static void __init intel_pstate_sysfs_expose_params(void)
1533{
1534 struct device *dev_root = bus_get_dev_root(&cpu_subsys);
1535 int rc;
1536
1537 if (dev_root) {
1538 intel_pstate_kobject = kobject_create_and_add("intel_pstate", &dev_root->kobj);
1539 put_device(dev_root);
1540 }
1541 if (WARN_ON(!intel_pstate_kobject))
1542 return;
1543
1544 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1545 if (WARN_ON(rc))
1546 return;
1547
1548 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1549 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
1550 WARN_ON(rc);
1551
1552 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
1553 WARN_ON(rc);
1554 }
1555
1556 /*
1557 * If per cpu limits are enforced there are no global limits, so
1558 * return without creating max/min_perf_pct attributes
1559 */
1560 if (per_cpu_limits)
1561 return;
1562
1563 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1564 WARN_ON(rc);
1565
1566 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1567 WARN_ON(rc);
1568
1569 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
1570 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
1571 WARN_ON(rc);
1572 }
1573}
1574
1575static void __init intel_pstate_sysfs_remove(void)
1576{
1577 if (!intel_pstate_kobject)
1578 return;
1579
1580 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
1581
1582 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1583 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
1584 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
1585 }
1586
1587 if (!per_cpu_limits) {
1588 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
1589 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
1590
1591 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
1592 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
1593 }
1594
1595 kobject_put(intel_pstate_kobject);
1596}
1597
1598static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
1599{
1600 int rc;
1601
1602 if (!hwp_active)
1603 return;
1604
1605 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1606 WARN_ON_ONCE(rc);
1607}
1608
1609static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1610{
1611 if (!hwp_active)
1612 return;
1613
1614 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1615}
1616
1617/************************** sysfs end ************************/
1618
1619static void intel_pstate_notify_work(struct work_struct *work)
1620{
1621 struct cpudata *cpudata =
1622 container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
1623 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
1624
1625 if (policy) {
1626 intel_pstate_get_hwp_cap(cpudata);
1627 __intel_pstate_update_max_freq(cpudata, policy);
1628
1629 cpufreq_cpu_release(policy);
1630 }
1631
1632 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
1633}
1634
1635static DEFINE_SPINLOCK(hwp_notify_lock);
1636static cpumask_t hwp_intr_enable_mask;
1637
1638void notify_hwp_interrupt(void)
1639{
1640 unsigned int this_cpu = smp_processor_id();
1641 struct cpudata *cpudata;
1642 unsigned long flags;
1643 u64 value;
1644
1645 if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1646 return;
1647
1648 rdmsrl_safe(MSR_HWP_STATUS, &value);
1649 if (!(value & 0x01))
1650 return;
1651
1652 spin_lock_irqsave(&hwp_notify_lock, flags);
1653
1654 if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
1655 goto ack_intr;
1656
1657 /*
1658 * Currently we never free all_cpu_data. And we can't reach here
1659 * without this allocated. But for safety for future changes, added
1660 * check.
1661 */
1662 if (unlikely(!READ_ONCE(all_cpu_data)))
1663 goto ack_intr;
1664
1665 /*
1666 * The free is done during cleanup, when cpufreq registry is failed.
1667 * We wouldn't be here if it fails on init or switch status. But for
1668 * future changes, added check.
1669 */
1670 cpudata = READ_ONCE(all_cpu_data[this_cpu]);
1671 if (unlikely(!cpudata))
1672 goto ack_intr;
1673
1674 schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
1675
1676 spin_unlock_irqrestore(&hwp_notify_lock, flags);
1677
1678 return;
1679
1680ack_intr:
1681 wrmsrl_safe(MSR_HWP_STATUS, 0);
1682 spin_unlock_irqrestore(&hwp_notify_lock, flags);
1683}
1684
1685static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
1686{
1687 unsigned long flags;
1688
1689 if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1690 return;
1691
1692 /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1693 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1694
1695 spin_lock_irqsave(&hwp_notify_lock, flags);
1696 if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
1697 cancel_delayed_work(&cpudata->hwp_notify_work);
1698 spin_unlock_irqrestore(&hwp_notify_lock, flags);
1699}
1700
1701static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
1702{
1703 /* Enable HWP notification interrupt for guaranteed performance change */
1704 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
1705 unsigned long flags;
1706
1707 spin_lock_irqsave(&hwp_notify_lock, flags);
1708 INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
1709 cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
1710 spin_unlock_irqrestore(&hwp_notify_lock, flags);
1711
1712 /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1713 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
1714 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
1715 }
1716}
1717
1718static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
1719{
1720 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1721
1722 /*
1723 * If the EPP is set by firmware, which means that firmware enabled HWP
1724 * - Is equal or less than 0x80 (default balance_perf EPP)
1725 * - But less performance oriented than performance EPP
1726 * then use this as new balance_perf EPP.
1727 */
1728 if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
1729 cpudata->epp_default > HWP_EPP_PERFORMANCE) {
1730 epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
1731 return;
1732 }
1733
1734 /*
1735 * If this CPU gen doesn't call for change in balance_perf
1736 * EPP return.
1737 */
1738 if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
1739 return;
1740
1741 /*
1742 * Use hard coded value per gen to update the balance_perf
1743 * and default EPP.
1744 */
1745 cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
1746 intel_pstate_set_epp(cpudata, cpudata->epp_default);
1747}
1748
1749static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1750{
1751 /* First disable HWP notification interrupt till we activate again */
1752 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1753 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1754
1755 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1756
1757 intel_pstate_enable_hwp_interrupt(cpudata);
1758
1759 if (cpudata->epp_default >= 0)
1760 return;
1761
1762 intel_pstate_update_epp_defaults(cpudata);
1763}
1764
1765static int atom_get_min_pstate(int not_used)
1766{
1767 u64 value;
1768
1769 rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1770 return (value >> 8) & 0x7F;
1771}
1772
1773static int atom_get_max_pstate(int not_used)
1774{
1775 u64 value;
1776
1777 rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1778 return (value >> 16) & 0x7F;
1779}
1780
1781static int atom_get_turbo_pstate(int not_used)
1782{
1783 u64 value;
1784
1785 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
1786 return value & 0x7F;
1787}
1788
1789static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1790{
1791 u64 val;
1792 int32_t vid_fp;
1793 u32 vid;
1794
1795 val = (u64)pstate << 8;
1796 if (global.no_turbo && !global.turbo_disabled)
1797 val |= (u64)1 << 32;
1798
1799 vid_fp = cpudata->vid.min + mul_fp(
1800 int_tofp(pstate - cpudata->pstate.min_pstate),
1801 cpudata->vid.ratio);
1802
1803 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1804 vid = ceiling_fp(vid_fp);
1805
1806 if (pstate > cpudata->pstate.max_pstate)
1807 vid = cpudata->vid.turbo;
1808
1809 return val | vid;
1810}
1811
1812static int silvermont_get_scaling(void)
1813{
1814 u64 value;
1815 int i;
1816 /* Defined in Table 35-6 from SDM (Sept 2015) */
1817 static int silvermont_freq_table[] = {
1818 83300, 100000, 133300, 116700, 80000};
1819
1820 rdmsrl(MSR_FSB_FREQ, value);
1821 i = value & 0x7;
1822 WARN_ON(i > 4);
1823
1824 return silvermont_freq_table[i];
1825}
1826
1827static int airmont_get_scaling(void)
1828{
1829 u64 value;
1830 int i;
1831 /* Defined in Table 35-10 from SDM (Sept 2015) */
1832 static int airmont_freq_table[] = {
1833 83300, 100000, 133300, 116700, 80000,
1834 93300, 90000, 88900, 87500};
1835
1836 rdmsrl(MSR_FSB_FREQ, value);
1837 i = value & 0xF;
1838 WARN_ON(i > 8);
1839
1840 return airmont_freq_table[i];
1841}
1842
1843static void atom_get_vid(struct cpudata *cpudata)
1844{
1845 u64 value;
1846
1847 rdmsrl(MSR_ATOM_CORE_VIDS, value);
1848 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1849 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1850 cpudata->vid.ratio = div_fp(
1851 cpudata->vid.max - cpudata->vid.min,
1852 int_tofp(cpudata->pstate.max_pstate -
1853 cpudata->pstate.min_pstate));
1854
1855 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
1856 cpudata->vid.turbo = value & 0x7f;
1857}
1858
1859static int core_get_min_pstate(int cpu)
1860{
1861 u64 value;
1862
1863 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
1864 return (value >> 40) & 0xFF;
1865}
1866
1867static int core_get_max_pstate_physical(int cpu)
1868{
1869 u64 value;
1870
1871 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
1872 return (value >> 8) & 0xFF;
1873}
1874
1875static int core_get_tdp_ratio(int cpu, u64 plat_info)
1876{
1877 /* Check how many TDP levels present */
1878 if (plat_info & 0x600000000) {
1879 u64 tdp_ctrl;
1880 u64 tdp_ratio;
1881 int tdp_msr;
1882 int err;
1883
1884 /* Get the TDP level (0, 1, 2) to get ratios */
1885 err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1886 if (err)
1887 return err;
1888
1889 /* TDP MSR are continuous starting at 0x648 */
1890 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
1891 err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
1892 if (err)
1893 return err;
1894
1895 /* For level 1 and 2, bits[23:16] contain the ratio */
1896 if (tdp_ctrl & 0x03)
1897 tdp_ratio >>= 16;
1898
1899 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1900 pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
1901
1902 return (int)tdp_ratio;
1903 }
1904
1905 return -ENXIO;
1906}
1907
1908static int core_get_max_pstate(int cpu)
1909{
1910 u64 tar;
1911 u64 plat_info;
1912 int max_pstate;
1913 int tdp_ratio;
1914 int err;
1915
1916 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
1917 max_pstate = (plat_info >> 8) & 0xFF;
1918
1919 tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
1920 if (tdp_ratio <= 0)
1921 return max_pstate;
1922
1923 if (hwp_active) {
1924 /* Turbo activation ratio is not used on HWP platforms */
1925 return tdp_ratio;
1926 }
1927
1928 err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
1929 if (!err) {
1930 int tar_levels;
1931
1932 /* Do some sanity checking for safety */
1933 tar_levels = tar & 0xff;
1934 if (tdp_ratio - 1 == tar_levels) {
1935 max_pstate = tar_levels;
1936 pr_debug("max_pstate=TAC %x\n", max_pstate);
1937 }
1938 }
1939
1940 return max_pstate;
1941}
1942
1943static int core_get_turbo_pstate(int cpu)
1944{
1945 u64 value;
1946 int nont, ret;
1947
1948 rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
1949 nont = core_get_max_pstate(cpu);
1950 ret = (value) & 255;
1951 if (ret <= nont)
1952 ret = nont;
1953 return ret;
1954}
1955
1956static u64 core_get_val(struct cpudata *cpudata, int pstate)
1957{
1958 u64 val;
1959
1960 val = (u64)pstate << 8;
1961 if (global.no_turbo && !global.turbo_disabled)
1962 val |= (u64)1 << 32;
1963
1964 return val;
1965}
1966
1967static int knl_get_aperf_mperf_shift(void)
1968{
1969 return 10;
1970}
1971
1972static int knl_get_turbo_pstate(int cpu)
1973{
1974 u64 value;
1975 int nont, ret;
1976
1977 rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
1978 nont = core_get_max_pstate(cpu);
1979 ret = (((value) >> 8) & 0xFF);
1980 if (ret <= nont)
1981 ret = nont;
1982 return ret;
1983}
1984
1985static void hybrid_get_type(void *data)
1986{
1987 u8 *cpu_type = data;
1988
1989 *cpu_type = get_this_hybrid_cpu_type();
1990}
1991
1992static int hwp_get_cpu_scaling(int cpu)
1993{
1994 u8 cpu_type = 0;
1995
1996 smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
1997 /* P-cores have a smaller perf level-to-freqency scaling factor. */
1998 if (cpu_type == 0x40)
1999 return hybrid_scaling_factor;
2000
2001 /* Use default core scaling for E-cores */
2002 if (cpu_type == 0x20)
2003 return core_get_scaling();
2004
2005 /*
2006 * If reached here, this system is either non-hybrid (like Tiger
2007 * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
2008 * no E cores (in which case CPUID for hybrid support is 0).
2009 *
2010 * The CPPC nominal_frequency field is 0 for non-hybrid systems,
2011 * so the default core scaling will be used for them.
2012 */
2013 return intel_pstate_cppc_get_scaling(cpu);
2014}
2015
2016static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
2017{
2018 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
2019 cpu->pstate.current_pstate = pstate;
2020 /*
2021 * Generally, there is no guarantee that this code will always run on
2022 * the CPU being updated, so force the register update to run on the
2023 * right CPU.
2024 */
2025 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
2026 pstate_funcs.get_val(cpu, pstate));
2027}
2028
2029static void intel_pstate_set_min_pstate(struct cpudata *cpu)
2030{
2031 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
2032}
2033
2034static void intel_pstate_max_within_limits(struct cpudata *cpu)
2035{
2036 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
2037
2038 update_turbo_state();
2039 intel_pstate_set_pstate(cpu, pstate);
2040}
2041
2042static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
2043{
2044 int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
2045 int perf_ctl_scaling = pstate_funcs.get_scaling();
2046
2047 cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
2048 cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
2049 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
2050
2051 if (hwp_active && !hwp_mode_bdw) {
2052 __intel_pstate_get_hwp_cap(cpu);
2053
2054 if (pstate_funcs.get_cpu_scaling) {
2055 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
2056 if (cpu->pstate.scaling != perf_ctl_scaling)
2057 intel_pstate_hybrid_hwp_adjust(cpu);
2058 } else {
2059 cpu->pstate.scaling = perf_ctl_scaling;
2060 }
2061 } else {
2062 cpu->pstate.scaling = perf_ctl_scaling;
2063 cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
2064 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
2065 }
2066
2067 if (cpu->pstate.scaling == perf_ctl_scaling) {
2068 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
2069 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
2070 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
2071 }
2072
2073 if (pstate_funcs.get_aperf_mperf_shift)
2074 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
2075
2076 if (pstate_funcs.get_vid)
2077 pstate_funcs.get_vid(cpu);
2078
2079 intel_pstate_set_min_pstate(cpu);
2080}
2081
2082/*
2083 * Long hold time will keep high perf limits for long time,
2084 * which negatively impacts perf/watt for some workloads,
2085 * like specpower. 3ms is based on experiements on some
2086 * workoads.
2087 */
2088static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
2089
2090static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
2091{
2092 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
2093 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2094 u32 max_limit = (hwp_req & 0xff00) >> 8;
2095 u32 min_limit = (hwp_req & 0xff);
2096 u32 boost_level1;
2097
2098 /*
2099 * Cases to consider (User changes via sysfs or boot time):
2100 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
2101 * No boost, return.
2102 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
2103 * Should result in one level boost only for P0.
2104 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
2105 * Should result in two level boost:
2106 * (min + p1)/2 and P1.
2107 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
2108 * Should result in three level boost:
2109 * (min + p1)/2, P1 and P0.
2110 */
2111
2112 /* If max and min are equal or already at max, nothing to boost */
2113 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
2114 return;
2115
2116 if (!cpu->hwp_boost_min)
2117 cpu->hwp_boost_min = min_limit;
2118
2119 /* level at half way mark between min and guranteed */
2120 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
2121
2122 if (cpu->hwp_boost_min < boost_level1)
2123 cpu->hwp_boost_min = boost_level1;
2124 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
2125 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
2126 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
2127 max_limit != HWP_GUARANTEED_PERF(hwp_cap))
2128 cpu->hwp_boost_min = max_limit;
2129 else
2130 return;
2131
2132 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
2133 wrmsrl(MSR_HWP_REQUEST, hwp_req);
2134 cpu->last_update = cpu->sample.time;
2135}
2136
2137static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
2138{
2139 if (cpu->hwp_boost_min) {
2140 bool expired;
2141
2142 /* Check if we are idle for hold time to boost down */
2143 expired = time_after64(cpu->sample.time, cpu->last_update +
2144 hwp_boost_hold_time_ns);
2145 if (expired) {
2146 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
2147 cpu->hwp_boost_min = 0;
2148 }
2149 }
2150 cpu->last_update = cpu->sample.time;
2151}
2152
2153static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
2154 u64 time)
2155{
2156 cpu->sample.time = time;
2157
2158 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
2159 bool do_io = false;
2160
2161 cpu->sched_flags = 0;
2162 /*
2163 * Set iowait_boost flag and update time. Since IO WAIT flag
2164 * is set all the time, we can't just conclude that there is
2165 * some IO bound activity is scheduled on this CPU with just
2166 * one occurrence. If we receive at least two in two
2167 * consecutive ticks, then we treat as boost candidate.
2168 */
2169 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
2170 do_io = true;
2171
2172 cpu->last_io_update = time;
2173
2174 if (do_io)
2175 intel_pstate_hwp_boost_up(cpu);
2176
2177 } else {
2178 intel_pstate_hwp_boost_down(cpu);
2179 }
2180}
2181
2182static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
2183 u64 time, unsigned int flags)
2184{
2185 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2186
2187 cpu->sched_flags |= flags;
2188
2189 if (smp_processor_id() == cpu->cpu)
2190 intel_pstate_update_util_hwp_local(cpu, time);
2191}
2192
2193static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
2194{
2195 struct sample *sample = &cpu->sample;
2196
2197 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
2198}
2199
2200static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
2201{
2202 u64 aperf, mperf;
2203 unsigned long flags;
2204 u64 tsc;
2205
2206 local_irq_save(flags);
2207 rdmsrl(MSR_IA32_APERF, aperf);
2208 rdmsrl(MSR_IA32_MPERF, mperf);
2209 tsc = rdtsc();
2210 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
2211 local_irq_restore(flags);
2212 return false;
2213 }
2214 local_irq_restore(flags);
2215
2216 cpu->last_sample_time = cpu->sample.time;
2217 cpu->sample.time = time;
2218 cpu->sample.aperf = aperf;
2219 cpu->sample.mperf = mperf;
2220 cpu->sample.tsc = tsc;
2221 cpu->sample.aperf -= cpu->prev_aperf;
2222 cpu->sample.mperf -= cpu->prev_mperf;
2223 cpu->sample.tsc -= cpu->prev_tsc;
2224
2225 cpu->prev_aperf = aperf;
2226 cpu->prev_mperf = mperf;
2227 cpu->prev_tsc = tsc;
2228 /*
2229 * First time this function is invoked in a given cycle, all of the
2230 * previous sample data fields are equal to zero or stale and they must
2231 * be populated with meaningful numbers for things to work, so assume
2232 * that sample.time will always be reset before setting the utilization
2233 * update hook and make the caller skip the sample then.
2234 */
2235 if (cpu->last_sample_time) {
2236 intel_pstate_calc_avg_perf(cpu);
2237 return true;
2238 }
2239 return false;
2240}
2241
2242static inline int32_t get_avg_frequency(struct cpudata *cpu)
2243{
2244 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
2245}
2246
2247static inline int32_t get_avg_pstate(struct cpudata *cpu)
2248{
2249 return mul_ext_fp(cpu->pstate.max_pstate_physical,
2250 cpu->sample.core_avg_perf);
2251}
2252
2253static inline int32_t get_target_pstate(struct cpudata *cpu)
2254{
2255 struct sample *sample = &cpu->sample;
2256 int32_t busy_frac;
2257 int target, avg_pstate;
2258
2259 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
2260 sample->tsc);
2261
2262 if (busy_frac < cpu->iowait_boost)
2263 busy_frac = cpu->iowait_boost;
2264
2265 sample->busy_scaled = busy_frac * 100;
2266
2267 target = global.no_turbo || global.turbo_disabled ?
2268 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2269 target += target >> 2;
2270 target = mul_fp(target, busy_frac);
2271 if (target < cpu->pstate.min_pstate)
2272 target = cpu->pstate.min_pstate;
2273
2274 /*
2275 * If the average P-state during the previous cycle was higher than the
2276 * current target, add 50% of the difference to the target to reduce
2277 * possible performance oscillations and offset possible performance
2278 * loss related to moving the workload from one CPU to another within
2279 * a package/module.
2280 */
2281 avg_pstate = get_avg_pstate(cpu);
2282 if (avg_pstate > target)
2283 target += (avg_pstate - target) >> 1;
2284
2285 return target;
2286}
2287
2288static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
2289{
2290 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
2291 int max_pstate = max(min_pstate, cpu->max_perf_ratio);
2292
2293 return clamp_t(int, pstate, min_pstate, max_pstate);
2294}
2295
2296static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
2297{
2298 if (pstate == cpu->pstate.current_pstate)
2299 return;
2300
2301 cpu->pstate.current_pstate = pstate;
2302 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
2303}
2304
2305static void intel_pstate_adjust_pstate(struct cpudata *cpu)
2306{
2307 int from = cpu->pstate.current_pstate;
2308 struct sample *sample;
2309 int target_pstate;
2310
2311 update_turbo_state();
2312
2313 target_pstate = get_target_pstate(cpu);
2314 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2315 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
2316 intel_pstate_update_pstate(cpu, target_pstate);
2317
2318 sample = &cpu->sample;
2319 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
2320 fp_toint(sample->busy_scaled),
2321 from,
2322 cpu->pstate.current_pstate,
2323 sample->mperf,
2324 sample->aperf,
2325 sample->tsc,
2326 get_avg_frequency(cpu),
2327 fp_toint(cpu->iowait_boost * 100));
2328}
2329
2330static void intel_pstate_update_util(struct update_util_data *data, u64 time,
2331 unsigned int flags)
2332{
2333 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2334 u64 delta_ns;
2335
2336 /* Don't allow remote callbacks */
2337 if (smp_processor_id() != cpu->cpu)
2338 return;
2339
2340 delta_ns = time - cpu->last_update;
2341 if (flags & SCHED_CPUFREQ_IOWAIT) {
2342 /* Start over if the CPU may have been idle. */
2343 if (delta_ns > TICK_NSEC) {
2344 cpu->iowait_boost = ONE_EIGHTH_FP;
2345 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
2346 cpu->iowait_boost <<= 1;
2347 if (cpu->iowait_boost > int_tofp(1))
2348 cpu->iowait_boost = int_tofp(1);
2349 } else {
2350 cpu->iowait_boost = ONE_EIGHTH_FP;
2351 }
2352 } else if (cpu->iowait_boost) {
2353 /* Clear iowait_boost if the CPU may have been idle. */
2354 if (delta_ns > TICK_NSEC)
2355 cpu->iowait_boost = 0;
2356 else
2357 cpu->iowait_boost >>= 1;
2358 }
2359 cpu->last_update = time;
2360 delta_ns = time - cpu->sample.time;
2361 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
2362 return;
2363
2364 if (intel_pstate_sample(cpu, time))
2365 intel_pstate_adjust_pstate(cpu);
2366}
2367
2368static struct pstate_funcs core_funcs = {
2369 .get_max = core_get_max_pstate,
2370 .get_max_physical = core_get_max_pstate_physical,
2371 .get_min = core_get_min_pstate,
2372 .get_turbo = core_get_turbo_pstate,
2373 .get_scaling = core_get_scaling,
2374 .get_val = core_get_val,
2375};
2376
2377static const struct pstate_funcs silvermont_funcs = {
2378 .get_max = atom_get_max_pstate,
2379 .get_max_physical = atom_get_max_pstate,
2380 .get_min = atom_get_min_pstate,
2381 .get_turbo = atom_get_turbo_pstate,
2382 .get_val = atom_get_val,
2383 .get_scaling = silvermont_get_scaling,
2384 .get_vid = atom_get_vid,
2385};
2386
2387static const struct pstate_funcs airmont_funcs = {
2388 .get_max = atom_get_max_pstate,
2389 .get_max_physical = atom_get_max_pstate,
2390 .get_min = atom_get_min_pstate,
2391 .get_turbo = atom_get_turbo_pstate,
2392 .get_val = atom_get_val,
2393 .get_scaling = airmont_get_scaling,
2394 .get_vid = atom_get_vid,
2395};
2396
2397static const struct pstate_funcs knl_funcs = {
2398 .get_max = core_get_max_pstate,
2399 .get_max_physical = core_get_max_pstate_physical,
2400 .get_min = core_get_min_pstate,
2401 .get_turbo = knl_get_turbo_pstate,
2402 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
2403 .get_scaling = core_get_scaling,
2404 .get_val = core_get_val,
2405};
2406
2407#define X86_MATCH(model, policy) \
2408 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
2409 X86_FEATURE_APERFMPERF, &policy)
2410
2411static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
2412 X86_MATCH(SANDYBRIDGE, core_funcs),
2413 X86_MATCH(SANDYBRIDGE_X, core_funcs),
2414 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
2415 X86_MATCH(IVYBRIDGE, core_funcs),
2416 X86_MATCH(HASWELL, core_funcs),
2417 X86_MATCH(BROADWELL, core_funcs),
2418 X86_MATCH(IVYBRIDGE_X, core_funcs),
2419 X86_MATCH(HASWELL_X, core_funcs),
2420 X86_MATCH(HASWELL_L, core_funcs),
2421 X86_MATCH(HASWELL_G, core_funcs),
2422 X86_MATCH(BROADWELL_G, core_funcs),
2423 X86_MATCH(ATOM_AIRMONT, airmont_funcs),
2424 X86_MATCH(SKYLAKE_L, core_funcs),
2425 X86_MATCH(BROADWELL_X, core_funcs),
2426 X86_MATCH(SKYLAKE, core_funcs),
2427 X86_MATCH(BROADWELL_D, core_funcs),
2428 X86_MATCH(XEON_PHI_KNL, knl_funcs),
2429 X86_MATCH(XEON_PHI_KNM, knl_funcs),
2430 X86_MATCH(ATOM_GOLDMONT, core_funcs),
2431 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
2432 X86_MATCH(SKYLAKE_X, core_funcs),
2433 X86_MATCH(COMETLAKE, core_funcs),
2434 X86_MATCH(ICELAKE_X, core_funcs),
2435 X86_MATCH(TIGERLAKE, core_funcs),
2436 X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
2437 X86_MATCH(EMERALDRAPIDS_X, core_funcs),
2438 {}
2439};
2440MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
2441
2442static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
2443 X86_MATCH(BROADWELL_D, core_funcs),
2444 X86_MATCH(BROADWELL_X, core_funcs),
2445 X86_MATCH(SKYLAKE_X, core_funcs),
2446 X86_MATCH(ICELAKE_X, core_funcs),
2447 X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
2448 {}
2449};
2450
2451static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
2452 X86_MATCH(KABYLAKE, core_funcs),
2453 {}
2454};
2455
2456static int intel_pstate_init_cpu(unsigned int cpunum)
2457{
2458 struct cpudata *cpu;
2459
2460 cpu = all_cpu_data[cpunum];
2461
2462 if (!cpu) {
2463 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
2464 if (!cpu)
2465 return -ENOMEM;
2466
2467 WRITE_ONCE(all_cpu_data[cpunum], cpu);
2468
2469 cpu->cpu = cpunum;
2470
2471 cpu->epp_default = -EINVAL;
2472
2473 if (hwp_active) {
2474 intel_pstate_hwp_enable(cpu);
2475
2476 if (intel_pstate_acpi_pm_profile_server())
2477 hwp_boost = true;
2478 }
2479 } else if (hwp_active) {
2480 /*
2481 * Re-enable HWP in case this happens after a resume from ACPI
2482 * S3 if the CPU was offline during the whole system/resume
2483 * cycle.
2484 */
2485 intel_pstate_hwp_reenable(cpu);
2486 }
2487
2488 cpu->epp_powersave = -EINVAL;
2489 cpu->epp_policy = 0;
2490
2491 intel_pstate_get_cpu_pstates(cpu);
2492
2493 pr_debug("controlling: cpu %d\n", cpunum);
2494
2495 return 0;
2496}
2497
2498static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2499{
2500 struct cpudata *cpu = all_cpu_data[cpu_num];
2501
2502 if (hwp_active && !hwp_boost)
2503 return;
2504
2505 if (cpu->update_util_set)
2506 return;
2507
2508 /* Prevent intel_pstate_update_util() from using stale data. */
2509 cpu->sample.time = 0;
2510 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2511 (hwp_active ?
2512 intel_pstate_update_util_hwp :
2513 intel_pstate_update_util));
2514 cpu->update_util_set = true;
2515}
2516
2517static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2518{
2519 struct cpudata *cpu_data = all_cpu_data[cpu];
2520
2521 if (!cpu_data->update_util_set)
2522 return;
2523
2524 cpufreq_remove_update_util_hook(cpu);
2525 cpu_data->update_util_set = false;
2526 synchronize_rcu();
2527}
2528
2529static int intel_pstate_get_max_freq(struct cpudata *cpu)
2530{
2531 return global.turbo_disabled || global.no_turbo ?
2532 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2533}
2534
2535static void intel_pstate_update_perf_limits(struct cpudata *cpu,
2536 unsigned int policy_min,
2537 unsigned int policy_max)
2538{
2539 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
2540 int32_t max_policy_perf, min_policy_perf;
2541
2542 max_policy_perf = policy_max / perf_ctl_scaling;
2543 if (policy_max == policy_min) {
2544 min_policy_perf = max_policy_perf;
2545 } else {
2546 min_policy_perf = policy_min / perf_ctl_scaling;
2547 min_policy_perf = clamp_t(int32_t, min_policy_perf,
2548 0, max_policy_perf);
2549 }
2550
2551 /*
2552 * HWP needs some special consideration, because HWP_REQUEST uses
2553 * abstract values to represent performance rather than pure ratios.
2554 */
2555 if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
2556 int freq;
2557
2558 freq = max_policy_perf * perf_ctl_scaling;
2559 max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
2560 freq = min_policy_perf * perf_ctl_scaling;
2561 min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
2562 }
2563
2564 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
2565 cpu->cpu, min_policy_perf, max_policy_perf);
2566
2567 /* Normalize user input to [min_perf, max_perf] */
2568 if (per_cpu_limits) {
2569 cpu->min_perf_ratio = min_policy_perf;
2570 cpu->max_perf_ratio = max_policy_perf;
2571 } else {
2572 int turbo_max = cpu->pstate.turbo_pstate;
2573 int32_t global_min, global_max;
2574
2575 /* Global limits are in percent of the maximum turbo P-state. */
2576 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2577 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2578 global_min = clamp_t(int32_t, global_min, 0, global_max);
2579
2580 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
2581 global_min, global_max);
2582
2583 cpu->min_perf_ratio = max(min_policy_perf, global_min);
2584 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
2585 cpu->max_perf_ratio = min(max_policy_perf, global_max);
2586 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
2587
2588 /* Make sure min_perf <= max_perf */
2589 cpu->min_perf_ratio = min(cpu->min_perf_ratio,
2590 cpu->max_perf_ratio);
2591
2592 }
2593 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
2594 cpu->max_perf_ratio,
2595 cpu->min_perf_ratio);
2596}
2597
2598static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2599{
2600 struct cpudata *cpu;
2601
2602 if (!policy->cpuinfo.max_freq)
2603 return -ENODEV;
2604
2605 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2606 policy->cpuinfo.max_freq, policy->max);
2607
2608 cpu = all_cpu_data[policy->cpu];
2609 cpu->policy = policy->policy;
2610
2611 mutex_lock(&intel_pstate_limits_lock);
2612
2613 intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2614
2615 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2616 /*
2617 * NOHZ_FULL CPUs need this as the governor callback may not
2618 * be invoked on them.
2619 */
2620 intel_pstate_clear_update_util_hook(policy->cpu);
2621 intel_pstate_max_within_limits(cpu);
2622 } else {
2623 intel_pstate_set_update_util_hook(policy->cpu);
2624 }
2625
2626 if (hwp_active) {
2627 /*
2628 * When hwp_boost was active before and dynamically it
2629 * was turned off, in that case we need to clear the
2630 * update util hook.
2631 */
2632 if (!hwp_boost)
2633 intel_pstate_clear_update_util_hook(policy->cpu);
2634 intel_pstate_hwp_set(policy->cpu);
2635 }
2636 /*
2637 * policy->cur is never updated with the intel_pstate driver, but it
2638 * is used as a stale frequency value. So, keep it within limits.
2639 */
2640 policy->cur = policy->min;
2641
2642 mutex_unlock(&intel_pstate_limits_lock);
2643
2644 return 0;
2645}
2646
2647static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
2648 struct cpufreq_policy_data *policy)
2649{
2650 if (!hwp_active &&
2651 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2652 policy->max < policy->cpuinfo.max_freq &&
2653 policy->max > cpu->pstate.max_freq) {
2654 pr_debug("policy->max > max non turbo frequency\n");
2655 policy->max = policy->cpuinfo.max_freq;
2656 }
2657}
2658
2659static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
2660 struct cpufreq_policy_data *policy)
2661{
2662 int max_freq;
2663
2664 update_turbo_state();
2665 if (hwp_active) {
2666 intel_pstate_get_hwp_cap(cpu);
2667 max_freq = global.no_turbo || global.turbo_disabled ?
2668 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2669 } else {
2670 max_freq = intel_pstate_get_max_freq(cpu);
2671 }
2672 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
2673
2674 intel_pstate_adjust_policy_max(cpu, policy);
2675}
2676
2677static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
2678{
2679 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
2680
2681 return 0;
2682}
2683
2684static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
2685{
2686 struct cpudata *cpu = all_cpu_data[policy->cpu];
2687
2688 pr_debug("CPU %d going offline\n", cpu->cpu);
2689
2690 if (cpu->suspended)
2691 return 0;
2692
2693 /*
2694 * If the CPU is an SMT thread and it goes offline with the performance
2695 * settings different from the minimum, it will prevent its sibling
2696 * from getting to lower performance levels, so force the minimum
2697 * performance on CPU offline to prevent that from happening.
2698 */
2699 if (hwp_active)
2700 intel_pstate_hwp_offline(cpu);
2701 else
2702 intel_pstate_set_min_pstate(cpu);
2703
2704 intel_pstate_exit_perf_limits(policy);
2705
2706 return 0;
2707}
2708
2709static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
2710{
2711 struct cpudata *cpu = all_cpu_data[policy->cpu];
2712
2713 pr_debug("CPU %d going online\n", cpu->cpu);
2714
2715 intel_pstate_init_acpi_perf_limits(policy);
2716
2717 if (hwp_active) {
2718 /*
2719 * Re-enable HWP and clear the "suspended" flag to let "resume"
2720 * know that it need not do that.
2721 */
2722 intel_pstate_hwp_reenable(cpu);
2723 cpu->suspended = false;
2724 }
2725
2726 return 0;
2727}
2728
2729static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
2730{
2731 intel_pstate_clear_update_util_hook(policy->cpu);
2732
2733 return intel_cpufreq_cpu_offline(policy);
2734}
2735
2736static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2737{
2738 pr_debug("CPU %d exiting\n", policy->cpu);
2739
2740 policy->fast_switch_possible = false;
2741
2742 return 0;
2743}
2744
2745static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2746{
2747 struct cpudata *cpu;
2748 int rc;
2749
2750 rc = intel_pstate_init_cpu(policy->cpu);
2751 if (rc)
2752 return rc;
2753
2754 cpu = all_cpu_data[policy->cpu];
2755
2756 cpu->max_perf_ratio = 0xFF;
2757 cpu->min_perf_ratio = 0;
2758
2759 /* cpuinfo and default policy values */
2760 policy->cpuinfo.min_freq = cpu->pstate.min_freq;
2761 update_turbo_state();
2762 global.turbo_disabled_mf = global.turbo_disabled;
2763 policy->cpuinfo.max_freq = global.turbo_disabled ?
2764 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2765
2766 policy->min = policy->cpuinfo.min_freq;
2767 policy->max = policy->cpuinfo.max_freq;
2768
2769 intel_pstate_init_acpi_perf_limits(policy);
2770
2771 policy->fast_switch_possible = true;
2772
2773 return 0;
2774}
2775
2776static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2777{
2778 int ret = __intel_pstate_cpu_init(policy);
2779
2780 if (ret)
2781 return ret;
2782
2783 /*
2784 * Set the policy to powersave to provide a valid fallback value in case
2785 * the default cpufreq governor is neither powersave nor performance.
2786 */
2787 policy->policy = CPUFREQ_POLICY_POWERSAVE;
2788
2789 if (hwp_active) {
2790 struct cpudata *cpu = all_cpu_data[policy->cpu];
2791
2792 cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
2793 }
2794
2795 return 0;
2796}
2797
2798static struct cpufreq_driver intel_pstate = {
2799 .flags = CPUFREQ_CONST_LOOPS,
2800 .verify = intel_pstate_verify_policy,
2801 .setpolicy = intel_pstate_set_policy,
2802 .suspend = intel_pstate_suspend,
2803 .resume = intel_pstate_resume,
2804 .init = intel_pstate_cpu_init,
2805 .exit = intel_pstate_cpu_exit,
2806 .offline = intel_pstate_cpu_offline,
2807 .online = intel_pstate_cpu_online,
2808 .update_limits = intel_pstate_update_limits,
2809 .name = "intel_pstate",
2810};
2811
2812static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
2813{
2814 struct cpudata *cpu = all_cpu_data[policy->cpu];
2815
2816 intel_pstate_verify_cpu_policy(cpu, policy);
2817 intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2818
2819 return 0;
2820}
2821
2822/* Use of trace in passive mode:
2823 *
2824 * In passive mode the trace core_busy field (also known as the
2825 * performance field, and lablelled as such on the graphs; also known as
2826 * core_avg_perf) is not needed and so is re-assigned to indicate if the
2827 * driver call was via the normal or fast switch path. Various graphs
2828 * output from the intel_pstate_tracer.py utility that include core_busy
2829 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2830 * so we use 10 to indicate the normal path through the driver, and
2831 * 90 to indicate the fast switch path through the driver.
2832 * The scaled_busy field is not used, and is set to 0.
2833 */
2834
2835#define INTEL_PSTATE_TRACE_TARGET 10
2836#define INTEL_PSTATE_TRACE_FAST_SWITCH 90
2837
2838static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
2839{
2840 struct sample *sample;
2841
2842 if (!trace_pstate_sample_enabled())
2843 return;
2844
2845 if (!intel_pstate_sample(cpu, ktime_get()))
2846 return;
2847
2848 sample = &cpu->sample;
2849 trace_pstate_sample(trace_type,
2850 0,
2851 old_pstate,
2852 cpu->pstate.current_pstate,
2853 sample->mperf,
2854 sample->aperf,
2855 sample->tsc,
2856 get_avg_frequency(cpu),
2857 fp_toint(cpu->iowait_boost * 100));
2858}
2859
2860static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
2861 u32 desired, bool fast_switch)
2862{
2863 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
2864
2865 value &= ~HWP_MIN_PERF(~0L);
2866 value |= HWP_MIN_PERF(min);
2867
2868 value &= ~HWP_MAX_PERF(~0L);
2869 value |= HWP_MAX_PERF(max);
2870
2871 value &= ~HWP_DESIRED_PERF(~0L);
2872 value |= HWP_DESIRED_PERF(desired);
2873
2874 if (value == prev)
2875 return;
2876
2877 WRITE_ONCE(cpu->hwp_req_cached, value);
2878 if (fast_switch)
2879 wrmsrl(MSR_HWP_REQUEST, value);
2880 else
2881 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
2882}
2883
2884static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
2885 u32 target_pstate, bool fast_switch)
2886{
2887 if (fast_switch)
2888 wrmsrl(MSR_IA32_PERF_CTL,
2889 pstate_funcs.get_val(cpu, target_pstate));
2890 else
2891 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
2892 pstate_funcs.get_val(cpu, target_pstate));
2893}
2894
2895static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
2896 int target_pstate, bool fast_switch)
2897{
2898 struct cpudata *cpu = all_cpu_data[policy->cpu];
2899 int old_pstate = cpu->pstate.current_pstate;
2900
2901 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2902 if (hwp_active) {
2903 int max_pstate = policy->strict_target ?
2904 target_pstate : cpu->max_perf_ratio;
2905
2906 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
2907 fast_switch);
2908 } else if (target_pstate != old_pstate) {
2909 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
2910 }
2911
2912 cpu->pstate.current_pstate = target_pstate;
2913
2914 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
2915 INTEL_PSTATE_TRACE_TARGET, old_pstate);
2916
2917 return target_pstate;
2918}
2919
2920static int intel_cpufreq_target(struct cpufreq_policy *policy,
2921 unsigned int target_freq,
2922 unsigned int relation)
2923{
2924 struct cpudata *cpu = all_cpu_data[policy->cpu];
2925 struct cpufreq_freqs freqs;
2926 int target_pstate;
2927
2928 update_turbo_state();
2929
2930 freqs.old = policy->cur;
2931 freqs.new = target_freq;
2932
2933 cpufreq_freq_transition_begin(policy, &freqs);
2934
2935 target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
2936 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
2937
2938 freqs.new = target_pstate * cpu->pstate.scaling;
2939
2940 cpufreq_freq_transition_end(policy, &freqs, false);
2941
2942 return 0;
2943}
2944
2945static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2946 unsigned int target_freq)
2947{
2948 struct cpudata *cpu = all_cpu_data[policy->cpu];
2949 int target_pstate;
2950
2951 update_turbo_state();
2952
2953 target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
2954
2955 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
2956
2957 return target_pstate * cpu->pstate.scaling;
2958}
2959
2960static void intel_cpufreq_adjust_perf(unsigned int cpunum,
2961 unsigned long min_perf,
2962 unsigned long target_perf,
2963 unsigned long capacity)
2964{
2965 struct cpudata *cpu = all_cpu_data[cpunum];
2966 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2967 int old_pstate = cpu->pstate.current_pstate;
2968 int cap_pstate, min_pstate, max_pstate, target_pstate;
2969
2970 update_turbo_state();
2971 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
2972 HWP_HIGHEST_PERF(hwp_cap);
2973
2974 /* Optimization: Avoid unnecessary divisions. */
2975
2976 target_pstate = cap_pstate;
2977 if (target_perf < capacity)
2978 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
2979
2980 min_pstate = cap_pstate;
2981 if (min_perf < capacity)
2982 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
2983
2984 if (min_pstate < cpu->pstate.min_pstate)
2985 min_pstate = cpu->pstate.min_pstate;
2986
2987 if (min_pstate < cpu->min_perf_ratio)
2988 min_pstate = cpu->min_perf_ratio;
2989
2990 if (min_pstate > cpu->max_perf_ratio)
2991 min_pstate = cpu->max_perf_ratio;
2992
2993 max_pstate = min(cap_pstate, cpu->max_perf_ratio);
2994 if (max_pstate < min_pstate)
2995 max_pstate = min_pstate;
2996
2997 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
2998
2999 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
3000
3001 cpu->pstate.current_pstate = target_pstate;
3002 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
3003}
3004
3005static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
3006{
3007 struct freq_qos_request *req;
3008 struct cpudata *cpu;
3009 struct device *dev;
3010 int ret, freq;
3011
3012 dev = get_cpu_device(policy->cpu);
3013 if (!dev)
3014 return -ENODEV;
3015
3016 ret = __intel_pstate_cpu_init(policy);
3017 if (ret)
3018 return ret;
3019
3020 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
3021 /* This reflects the intel_pstate_get_cpu_pstates() setting. */
3022 policy->cur = policy->cpuinfo.min_freq;
3023
3024 req = kcalloc(2, sizeof(*req), GFP_KERNEL);
3025 if (!req) {
3026 ret = -ENOMEM;
3027 goto pstate_exit;
3028 }
3029
3030 cpu = all_cpu_data[policy->cpu];
3031
3032 if (hwp_active) {
3033 u64 value;
3034
3035 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
3036
3037 intel_pstate_get_hwp_cap(cpu);
3038
3039 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
3040 WRITE_ONCE(cpu->hwp_req_cached, value);
3041
3042 cpu->epp_cached = intel_pstate_get_epp(cpu, value);
3043 } else {
3044 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
3045 }
3046
3047 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
3048
3049 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
3050 freq);
3051 if (ret < 0) {
3052 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
3053 goto free_req;
3054 }
3055
3056 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
3057
3058 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
3059 freq);
3060 if (ret < 0) {
3061 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
3062 goto remove_min_req;
3063 }
3064
3065 policy->driver_data = req;
3066
3067 return 0;
3068
3069remove_min_req:
3070 freq_qos_remove_request(req);
3071free_req:
3072 kfree(req);
3073pstate_exit:
3074 intel_pstate_exit_perf_limits(policy);
3075
3076 return ret;
3077}
3078
3079static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
3080{
3081 struct freq_qos_request *req;
3082
3083 req = policy->driver_data;
3084
3085 freq_qos_remove_request(req + 1);
3086 freq_qos_remove_request(req);
3087 kfree(req);
3088
3089 return intel_pstate_cpu_exit(policy);
3090}
3091
3092static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
3093{
3094 intel_pstate_suspend(policy);
3095
3096 if (hwp_active) {
3097 struct cpudata *cpu = all_cpu_data[policy->cpu];
3098 u64 value = READ_ONCE(cpu->hwp_req_cached);
3099
3100 /*
3101 * Clear the desired perf field in MSR_HWP_REQUEST in case
3102 * intel_cpufreq_adjust_perf() is in use and the last value
3103 * written by it may not be suitable.
3104 */
3105 value &= ~HWP_DESIRED_PERF(~0L);
3106 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
3107 WRITE_ONCE(cpu->hwp_req_cached, value);
3108 }
3109
3110 return 0;
3111}
3112
3113static struct cpufreq_driver intel_cpufreq = {
3114 .flags = CPUFREQ_CONST_LOOPS,
3115 .verify = intel_cpufreq_verify_policy,
3116 .target = intel_cpufreq_target,
3117 .fast_switch = intel_cpufreq_fast_switch,
3118 .init = intel_cpufreq_cpu_init,
3119 .exit = intel_cpufreq_cpu_exit,
3120 .offline = intel_cpufreq_cpu_offline,
3121 .online = intel_pstate_cpu_online,
3122 .suspend = intel_cpufreq_suspend,
3123 .resume = intel_pstate_resume,
3124 .update_limits = intel_pstate_update_limits,
3125 .name = "intel_cpufreq",
3126};
3127
3128static struct cpufreq_driver *default_driver;
3129
3130static void intel_pstate_driver_cleanup(void)
3131{
3132 unsigned int cpu;
3133
3134 cpus_read_lock();
3135 for_each_online_cpu(cpu) {
3136 if (all_cpu_data[cpu]) {
3137 if (intel_pstate_driver == &intel_pstate)
3138 intel_pstate_clear_update_util_hook(cpu);
3139
3140 spin_lock(&hwp_notify_lock);
3141 kfree(all_cpu_data[cpu]);
3142 WRITE_ONCE(all_cpu_data[cpu], NULL);
3143 spin_unlock(&hwp_notify_lock);
3144 }
3145 }
3146 cpus_read_unlock();
3147
3148 intel_pstate_driver = NULL;
3149}
3150
3151static int intel_pstate_register_driver(struct cpufreq_driver *driver)
3152{
3153 int ret;
3154
3155 if (driver == &intel_pstate)
3156 intel_pstate_sysfs_expose_hwp_dynamic_boost();
3157
3158 memset(&global, 0, sizeof(global));
3159 global.max_perf_pct = 100;
3160
3161 intel_pstate_driver = driver;
3162 ret = cpufreq_register_driver(intel_pstate_driver);
3163 if (ret) {
3164 intel_pstate_driver_cleanup();
3165 return ret;
3166 }
3167
3168 global.min_perf_pct = min_perf_pct_min();
3169
3170 return 0;
3171}
3172
3173static ssize_t intel_pstate_show_status(char *buf)
3174{
3175 if (!intel_pstate_driver)
3176 return sprintf(buf, "off\n");
3177
3178 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
3179 "active" : "passive");
3180}
3181
3182static int intel_pstate_update_status(const char *buf, size_t size)
3183{
3184 if (size == 3 && !strncmp(buf, "off", size)) {
3185 if (!intel_pstate_driver)
3186 return -EINVAL;
3187
3188 if (hwp_active)
3189 return -EBUSY;
3190
3191 cpufreq_unregister_driver(intel_pstate_driver);
3192 intel_pstate_driver_cleanup();
3193 return 0;
3194 }
3195
3196 if (size == 6 && !strncmp(buf, "active", size)) {
3197 if (intel_pstate_driver) {
3198 if (intel_pstate_driver == &intel_pstate)
3199 return 0;
3200
3201 cpufreq_unregister_driver(intel_pstate_driver);
3202 }
3203
3204 return intel_pstate_register_driver(&intel_pstate);
3205 }
3206
3207 if (size == 7 && !strncmp(buf, "passive", size)) {
3208 if (intel_pstate_driver) {
3209 if (intel_pstate_driver == &intel_cpufreq)
3210 return 0;
3211
3212 cpufreq_unregister_driver(intel_pstate_driver);
3213 intel_pstate_sysfs_hide_hwp_dynamic_boost();
3214 }
3215
3216 return intel_pstate_register_driver(&intel_cpufreq);
3217 }
3218
3219 return -EINVAL;
3220}
3221
3222static int no_load __initdata;
3223static int no_hwp __initdata;
3224static int hwp_only __initdata;
3225static unsigned int force_load __initdata;
3226
3227static int __init intel_pstate_msrs_not_valid(void)
3228{
3229 if (!pstate_funcs.get_max(0) ||
3230 !pstate_funcs.get_min(0) ||
3231 !pstate_funcs.get_turbo(0))
3232 return -ENODEV;
3233
3234 return 0;
3235}
3236
3237static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
3238{
3239 pstate_funcs.get_max = funcs->get_max;
3240 pstate_funcs.get_max_physical = funcs->get_max_physical;
3241 pstate_funcs.get_min = funcs->get_min;
3242 pstate_funcs.get_turbo = funcs->get_turbo;
3243 pstate_funcs.get_scaling = funcs->get_scaling;
3244 pstate_funcs.get_val = funcs->get_val;
3245 pstate_funcs.get_vid = funcs->get_vid;
3246 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
3247}
3248
3249#ifdef CONFIG_ACPI
3250
3251static bool __init intel_pstate_no_acpi_pss(void)
3252{
3253 int i;
3254
3255 for_each_possible_cpu(i) {
3256 acpi_status status;
3257 union acpi_object *pss;
3258 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
3259 struct acpi_processor *pr = per_cpu(processors, i);
3260
3261 if (!pr)
3262 continue;
3263
3264 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
3265 if (ACPI_FAILURE(status))
3266 continue;
3267
3268 pss = buffer.pointer;
3269 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
3270 kfree(pss);
3271 return false;
3272 }
3273
3274 kfree(pss);
3275 }
3276
3277 pr_debug("ACPI _PSS not found\n");
3278 return true;
3279}
3280
3281static bool __init intel_pstate_no_acpi_pcch(void)
3282{
3283 acpi_status status;
3284 acpi_handle handle;
3285
3286 status = acpi_get_handle(NULL, "\\_SB", &handle);
3287 if (ACPI_FAILURE(status))
3288 goto not_found;
3289
3290 if (acpi_has_method(handle, "PCCH"))
3291 return false;
3292
3293not_found:
3294 pr_debug("ACPI PCCH not found\n");
3295 return true;
3296}
3297
3298static bool __init intel_pstate_has_acpi_ppc(void)
3299{
3300 int i;
3301
3302 for_each_possible_cpu(i) {
3303 struct acpi_processor *pr = per_cpu(processors, i);
3304
3305 if (!pr)
3306 continue;
3307 if (acpi_has_method(pr->handle, "_PPC"))
3308 return true;
3309 }
3310 pr_debug("ACPI _PPC not found\n");
3311 return false;
3312}
3313
3314enum {
3315 PSS,
3316 PPC,
3317};
3318
3319/* Hardware vendor-specific info that has its own power management modes */
3320static struct acpi_platform_list plat_info[] __initdata = {
3321 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
3322 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3323 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3324 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3325 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3326 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3327 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3328 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3329 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3330 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3331 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3332 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3333 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3334 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3335 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3336 { } /* End */
3337};
3338
3339#define BITMASK_OOB (BIT(8) | BIT(18))
3340
3341static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
3342{
3343 const struct x86_cpu_id *id;
3344 u64 misc_pwr;
3345 int idx;
3346
3347 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
3348 if (id) {
3349 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
3350 if (misc_pwr & BITMASK_OOB) {
3351 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
3352 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
3353 return true;
3354 }
3355 }
3356
3357 idx = acpi_match_platform_list(plat_info);
3358 if (idx < 0)
3359 return false;
3360
3361 switch (plat_info[idx].data) {
3362 case PSS:
3363 if (!intel_pstate_no_acpi_pss())
3364 return false;
3365
3366 return intel_pstate_no_acpi_pcch();
3367 case PPC:
3368 return intel_pstate_has_acpi_ppc() && !force_load;
3369 }
3370
3371 return false;
3372}
3373
3374static void intel_pstate_request_control_from_smm(void)
3375{
3376 /*
3377 * It may be unsafe to request P-states control from SMM if _PPC support
3378 * has not been enabled.
3379 */
3380 if (acpi_ppc)
3381 acpi_processor_pstate_control();
3382}
3383#else /* CONFIG_ACPI not enabled */
3384static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
3385static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
3386static inline void intel_pstate_request_control_from_smm(void) {}
3387#endif /* CONFIG_ACPI */
3388
3389#define INTEL_PSTATE_HWP_BROADWELL 0x01
3390
3391#define X86_MATCH_HWP(model, hwp_mode) \
3392 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
3393 X86_FEATURE_HWP, hwp_mode)
3394
3395static const struct x86_cpu_id hwp_support_ids[] __initconst = {
3396 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
3397 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
3398 X86_MATCH_HWP(ANY, 0),
3399 {}
3400};
3401
3402static bool intel_pstate_hwp_is_enabled(void)
3403{
3404 u64 value;
3405
3406 rdmsrl(MSR_PM_ENABLE, value);
3407 return !!(value & 0x1);
3408}
3409
3410static const struct x86_cpu_id intel_epp_balance_perf[] = {
3411 /*
3412 * Set EPP value as 102, this is the max suggested EPP
3413 * which can result in one core turbo frequency for
3414 * AlderLake Mobile CPUs.
3415 */
3416 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
3417 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
3418 {}
3419};
3420
3421static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
3422 X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
3423 {}
3424};
3425
3426static int __init intel_pstate_init(void)
3427{
3428 static struct cpudata **_all_cpu_data;
3429 const struct x86_cpu_id *id;
3430 int rc;
3431
3432 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3433 return -ENODEV;
3434
3435 id = x86_match_cpu(hwp_support_ids);
3436 if (id) {
3437 hwp_forced = intel_pstate_hwp_is_enabled();
3438
3439 if (hwp_forced)
3440 pr_info("HWP enabled by BIOS\n");
3441 else if (no_load)
3442 return -ENODEV;
3443
3444 copy_cpu_funcs(&core_funcs);
3445 /*
3446 * Avoid enabling HWP for processors without EPP support,
3447 * because that means incomplete HWP implementation which is a
3448 * corner case and supporting it is generally problematic.
3449 *
3450 * If HWP is enabled already, though, there is no choice but to
3451 * deal with it.
3452 */
3453 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
3454 WRITE_ONCE(hwp_active, 1);
3455 hwp_mode_bdw = id->driver_data;
3456 intel_pstate.attr = hwp_cpufreq_attrs;
3457 intel_cpufreq.attr = hwp_cpufreq_attrs;
3458 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
3459 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
3460 if (!default_driver)
3461 default_driver = &intel_pstate;
3462
3463 pstate_funcs.get_cpu_scaling = hwp_get_cpu_scaling;
3464
3465 goto hwp_cpu_matched;
3466 }
3467 pr_info("HWP not enabled\n");
3468 } else {
3469 if (no_load)
3470 return -ENODEV;
3471
3472 id = x86_match_cpu(intel_pstate_cpu_ids);
3473 if (!id) {
3474 pr_info("CPU model not supported\n");
3475 return -ENODEV;
3476 }
3477
3478 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
3479 }
3480
3481 if (intel_pstate_msrs_not_valid()) {
3482 pr_info("Invalid MSRs\n");
3483 return -ENODEV;
3484 }
3485 /* Without HWP start in the passive mode. */
3486 if (!default_driver)
3487 default_driver = &intel_cpufreq;
3488
3489hwp_cpu_matched:
3490 /*
3491 * The Intel pstate driver will be ignored if the platform
3492 * firmware has its own power management modes.
3493 */
3494 if (intel_pstate_platform_pwr_mgmt_exists()) {
3495 pr_info("P-states controlled by the platform\n");
3496 return -ENODEV;
3497 }
3498
3499 if (!hwp_active && hwp_only)
3500 return -ENOTSUPP;
3501
3502 pr_info("Intel P-state driver initializing\n");
3503
3504 _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
3505 if (!_all_cpu_data)
3506 return -ENOMEM;
3507
3508 WRITE_ONCE(all_cpu_data, _all_cpu_data);
3509
3510 intel_pstate_request_control_from_smm();
3511
3512 intel_pstate_sysfs_expose_params();
3513
3514 if (hwp_active) {
3515 const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
3516 const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
3517
3518 if (id)
3519 epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
3520
3521 if (hybrid_id) {
3522 hybrid_scaling_factor = hybrid_id->driver_data;
3523 pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor);
3524 }
3525
3526 }
3527
3528 mutex_lock(&intel_pstate_driver_lock);
3529 rc = intel_pstate_register_driver(default_driver);
3530 mutex_unlock(&intel_pstate_driver_lock);
3531 if (rc) {
3532 intel_pstate_sysfs_remove();
3533 return rc;
3534 }
3535
3536 if (hwp_active) {
3537 const struct x86_cpu_id *id;
3538
3539 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
3540 if (id) {
3541 set_power_ctl_ee_state(false);
3542 pr_info("Disabling energy efficiency optimization\n");
3543 }
3544
3545 pr_info("HWP enabled\n");
3546 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
3547 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
3548 }
3549
3550 return 0;
3551}
3552device_initcall(intel_pstate_init);
3553
3554static int __init intel_pstate_setup(char *str)
3555{
3556 if (!str)
3557 return -EINVAL;
3558
3559 if (!strcmp(str, "disable"))
3560 no_load = 1;
3561 else if (!strcmp(str, "active"))
3562 default_driver = &intel_pstate;
3563 else if (!strcmp(str, "passive"))
3564 default_driver = &intel_cpufreq;
3565
3566 if (!strcmp(str, "no_hwp"))
3567 no_hwp = 1;
3568
3569 if (!strcmp(str, "force"))
3570 force_load = 1;
3571 if (!strcmp(str, "hwp_only"))
3572 hwp_only = 1;
3573 if (!strcmp(str, "per_cpu_perf_limits"))
3574 per_cpu_limits = true;
3575
3576#ifdef CONFIG_ACPI
3577 if (!strcmp(str, "support_acpi_ppc"))
3578 acpi_ppc = true;
3579#endif
3580
3581 return 0;
3582}
3583early_param("intel_pstate", intel_pstate_setup);
3584
3585MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
3586MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * intel_pstate.c: Native P state management for Intel processors
4 *
5 * (C) Copyright 2012 Intel Corporation
6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/kernel_stat.h>
13#include <linux/module.h>
14#include <linux/ktime.h>
15#include <linux/hrtimer.h>
16#include <linux/tick.h>
17#include <linux/slab.h>
18#include <linux/sched/cpufreq.h>
19#include <linux/list.h>
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/sysfs.h>
23#include <linux/types.h>
24#include <linux/fs.h>
25#include <linux/acpi.h>
26#include <linux/vmalloc.h>
27#include <linux/pm_qos.h>
28#include <trace/events/power.h>
29
30#include <asm/div64.h>
31#include <asm/msr.h>
32#include <asm/cpu_device_id.h>
33#include <asm/cpufeature.h>
34#include <asm/intel-family.h>
35
36#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
37
38#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
39#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000
40#define INTEL_CPUFREQ_TRANSITION_DELAY 500
41
42#ifdef CONFIG_ACPI
43#include <acpi/processor.h>
44#include <acpi/cppc_acpi.h>
45#endif
46
47#define FRAC_BITS 8
48#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
49#define fp_toint(X) ((X) >> FRAC_BITS)
50
51#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
52
53#define EXT_BITS 6
54#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
55#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
56#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
57
58static inline int32_t mul_fp(int32_t x, int32_t y)
59{
60 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
61}
62
63static inline int32_t div_fp(s64 x, s64 y)
64{
65 return div64_s64((int64_t)x << FRAC_BITS, y);
66}
67
68static inline int ceiling_fp(int32_t x)
69{
70 int mask, ret;
71
72 ret = fp_toint(x);
73 mask = (1 << FRAC_BITS) - 1;
74 if (x & mask)
75 ret += 1;
76 return ret;
77}
78
79static inline u64 mul_ext_fp(u64 x, u64 y)
80{
81 return (x * y) >> EXT_FRAC_BITS;
82}
83
84static inline u64 div_ext_fp(u64 x, u64 y)
85{
86 return div64_u64(x << EXT_FRAC_BITS, y);
87}
88
89/**
90 * struct sample - Store performance sample
91 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
92 * performance during last sample period
93 * @busy_scaled: Scaled busy value which is used to calculate next
94 * P state. This can be different than core_avg_perf
95 * to account for cpu idle period
96 * @aperf: Difference of actual performance frequency clock count
97 * read from APERF MSR between last and current sample
98 * @mperf: Difference of maximum performance frequency clock count
99 * read from MPERF MSR between last and current sample
100 * @tsc: Difference of time stamp counter between last and
101 * current sample
102 * @time: Current time from scheduler
103 *
104 * This structure is used in the cpudata structure to store performance sample
105 * data for choosing next P State.
106 */
107struct sample {
108 int32_t core_avg_perf;
109 int32_t busy_scaled;
110 u64 aperf;
111 u64 mperf;
112 u64 tsc;
113 u64 time;
114};
115
116/**
117 * struct pstate_data - Store P state data
118 * @current_pstate: Current requested P state
119 * @min_pstate: Min P state possible for this platform
120 * @max_pstate: Max P state possible for this platform
121 * @max_pstate_physical:This is physical Max P state for a processor
122 * This can be higher than the max_pstate which can
123 * be limited by platform thermal design power limits
124 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
125 * @scaling: Scaling factor between performance and frequency
126 * @turbo_pstate: Max Turbo P state possible for this platform
127 * @min_freq: @min_pstate frequency in cpufreq units
128 * @max_freq: @max_pstate frequency in cpufreq units
129 * @turbo_freq: @turbo_pstate frequency in cpufreq units
130 *
131 * Stores the per cpu model P state limits and current P state.
132 */
133struct pstate_data {
134 int current_pstate;
135 int min_pstate;
136 int max_pstate;
137 int max_pstate_physical;
138 int perf_ctl_scaling;
139 int scaling;
140 int turbo_pstate;
141 unsigned int min_freq;
142 unsigned int max_freq;
143 unsigned int turbo_freq;
144};
145
146/**
147 * struct vid_data - Stores voltage information data
148 * @min: VID data for this platform corresponding to
149 * the lowest P state
150 * @max: VID data corresponding to the highest P State.
151 * @turbo: VID data for turbo P state
152 * @ratio: Ratio of (vid max - vid min) /
153 * (max P state - Min P State)
154 *
155 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
156 * This data is used in Atom platforms, where in addition to target P state,
157 * the voltage data needs to be specified to select next P State.
158 */
159struct vid_data {
160 int min;
161 int max;
162 int turbo;
163 int32_t ratio;
164};
165
166/**
167 * struct global_params - Global parameters, mostly tunable via sysfs.
168 * @no_turbo: Whether or not to use turbo P-states.
169 * @turbo_disabled: Whether or not turbo P-states are available at all,
170 * based on the MSR_IA32_MISC_ENABLE value and whether or
171 * not the maximum reported turbo P-state is different from
172 * the maximum reported non-turbo one.
173 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
174 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
175 * P-state capacity.
176 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
177 * P-state capacity.
178 */
179struct global_params {
180 bool no_turbo;
181 bool turbo_disabled;
182 bool turbo_disabled_mf;
183 int max_perf_pct;
184 int min_perf_pct;
185};
186
187/**
188 * struct cpudata - Per CPU instance data storage
189 * @cpu: CPU number for this instance data
190 * @policy: CPUFreq policy value
191 * @update_util: CPUFreq utility callback information
192 * @update_util_set: CPUFreq utility callback is set
193 * @iowait_boost: iowait-related boost fraction
194 * @last_update: Time of the last update.
195 * @pstate: Stores P state limits for this CPU
196 * @vid: Stores VID limits for this CPU
197 * @last_sample_time: Last Sample time
198 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference
199 * @prev_aperf: Last APERF value read from APERF MSR
200 * @prev_mperf: Last MPERF value read from MPERF MSR
201 * @prev_tsc: Last timestamp counter (TSC) value
202 * @prev_cummulative_iowait: IO Wait time difference from last and
203 * current sample
204 * @sample: Storage for storing last Sample data
205 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
206 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
207 * @acpi_perf_data: Stores ACPI perf information read from _PSS
208 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
209 * @epp_powersave: Last saved HWP energy performance preference
210 * (EPP) or energy performance bias (EPB),
211 * when policy switched to performance
212 * @epp_policy: Last saved policy used to set EPP/EPB
213 * @epp_default: Power on default HWP energy performance
214 * preference/bias
215 * @epp_cached Cached HWP energy-performance preference value
216 * @hwp_req_cached: Cached value of the last HWP Request MSR
217 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
218 * @last_io_update: Last time when IO wake flag was set
219 * @sched_flags: Store scheduler flags for possible cross CPU update
220 * @hwp_boost_min: Last HWP boosted min performance
221 * @suspended: Whether or not the driver has been suspended.
222 *
223 * This structure stores per CPU instance data for all CPUs.
224 */
225struct cpudata {
226 int cpu;
227
228 unsigned int policy;
229 struct update_util_data update_util;
230 bool update_util_set;
231
232 struct pstate_data pstate;
233 struct vid_data vid;
234
235 u64 last_update;
236 u64 last_sample_time;
237 u64 aperf_mperf_shift;
238 u64 prev_aperf;
239 u64 prev_mperf;
240 u64 prev_tsc;
241 u64 prev_cummulative_iowait;
242 struct sample sample;
243 int32_t min_perf_ratio;
244 int32_t max_perf_ratio;
245#ifdef CONFIG_ACPI
246 struct acpi_processor_performance acpi_perf_data;
247 bool valid_pss_table;
248#endif
249 unsigned int iowait_boost;
250 s16 epp_powersave;
251 s16 epp_policy;
252 s16 epp_default;
253 s16 epp_cached;
254 u64 hwp_req_cached;
255 u64 hwp_cap_cached;
256 u64 last_io_update;
257 unsigned int sched_flags;
258 u32 hwp_boost_min;
259 bool suspended;
260};
261
262static struct cpudata **all_cpu_data;
263
264/**
265 * struct pstate_funcs - Per CPU model specific callbacks
266 * @get_max: Callback to get maximum non turbo effective P state
267 * @get_max_physical: Callback to get maximum non turbo physical P state
268 * @get_min: Callback to get minimum P state
269 * @get_turbo: Callback to get turbo P state
270 * @get_scaling: Callback to get frequency scaling factor
271 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
272 * @get_val: Callback to convert P state to actual MSR write value
273 * @get_vid: Callback to get VID data for Atom platforms
274 *
275 * Core and Atom CPU models have different way to get P State limits. This
276 * structure is used to store those callbacks.
277 */
278struct pstate_funcs {
279 int (*get_max)(void);
280 int (*get_max_physical)(void);
281 int (*get_min)(void);
282 int (*get_turbo)(void);
283 int (*get_scaling)(void);
284 int (*get_aperf_mperf_shift)(void);
285 u64 (*get_val)(struct cpudata*, int pstate);
286 void (*get_vid)(struct cpudata *);
287};
288
289static struct pstate_funcs pstate_funcs __read_mostly;
290
291static int hwp_active __read_mostly;
292static int hwp_mode_bdw __read_mostly;
293static bool per_cpu_limits __read_mostly;
294static bool hwp_boost __read_mostly;
295
296static struct cpufreq_driver *intel_pstate_driver __read_mostly;
297
298#ifdef CONFIG_ACPI
299static bool acpi_ppc;
300#endif
301
302static struct global_params global;
303
304static DEFINE_MUTEX(intel_pstate_driver_lock);
305static DEFINE_MUTEX(intel_pstate_limits_lock);
306
307#ifdef CONFIG_ACPI
308
309static bool intel_pstate_acpi_pm_profile_server(void)
310{
311 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
312 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
313 return true;
314
315 return false;
316}
317
318static bool intel_pstate_get_ppc_enable_status(void)
319{
320 if (intel_pstate_acpi_pm_profile_server())
321 return true;
322
323 return acpi_ppc;
324}
325
326#ifdef CONFIG_ACPI_CPPC_LIB
327
328/* The work item is needed to avoid CPU hotplug locking issues */
329static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
330{
331 sched_set_itmt_support();
332}
333
334static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
335
336static void intel_pstate_set_itmt_prio(int cpu)
337{
338 struct cppc_perf_caps cppc_perf;
339 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
340 int ret;
341
342 ret = cppc_get_perf_caps(cpu, &cppc_perf);
343 if (ret)
344 return;
345
346 /*
347 * The priorities can be set regardless of whether or not
348 * sched_set_itmt_support(true) has been called and it is valid to
349 * update them at any time after it has been called.
350 */
351 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
352
353 if (max_highest_perf <= min_highest_perf) {
354 if (cppc_perf.highest_perf > max_highest_perf)
355 max_highest_perf = cppc_perf.highest_perf;
356
357 if (cppc_perf.highest_perf < min_highest_perf)
358 min_highest_perf = cppc_perf.highest_perf;
359
360 if (max_highest_perf > min_highest_perf) {
361 /*
362 * This code can be run during CPU online under the
363 * CPU hotplug locks, so sched_set_itmt_support()
364 * cannot be called from here. Queue up a work item
365 * to invoke it.
366 */
367 schedule_work(&sched_itmt_work);
368 }
369 }
370}
371
372static int intel_pstate_get_cppc_guaranteed(int cpu)
373{
374 struct cppc_perf_caps cppc_perf;
375 int ret;
376
377 ret = cppc_get_perf_caps(cpu, &cppc_perf);
378 if (ret)
379 return ret;
380
381 if (cppc_perf.guaranteed_perf)
382 return cppc_perf.guaranteed_perf;
383
384 return cppc_perf.nominal_perf;
385}
386
387#else /* CONFIG_ACPI_CPPC_LIB */
388static inline void intel_pstate_set_itmt_prio(int cpu)
389{
390}
391#endif /* CONFIG_ACPI_CPPC_LIB */
392
393static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
394{
395 struct cpudata *cpu;
396 int ret;
397 int i;
398
399 if (hwp_active) {
400 intel_pstate_set_itmt_prio(policy->cpu);
401 return;
402 }
403
404 if (!intel_pstate_get_ppc_enable_status())
405 return;
406
407 cpu = all_cpu_data[policy->cpu];
408
409 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
410 policy->cpu);
411 if (ret)
412 return;
413
414 /*
415 * Check if the control value in _PSS is for PERF_CTL MSR, which should
416 * guarantee that the states returned by it map to the states in our
417 * list directly.
418 */
419 if (cpu->acpi_perf_data.control_register.space_id !=
420 ACPI_ADR_SPACE_FIXED_HARDWARE)
421 goto err;
422
423 /*
424 * If there is only one entry _PSS, simply ignore _PSS and continue as
425 * usual without taking _PSS into account
426 */
427 if (cpu->acpi_perf_data.state_count < 2)
428 goto err;
429
430 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
431 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
432 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
433 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
434 (u32) cpu->acpi_perf_data.states[i].core_frequency,
435 (u32) cpu->acpi_perf_data.states[i].power,
436 (u32) cpu->acpi_perf_data.states[i].control);
437 }
438
439 /*
440 * The _PSS table doesn't contain whole turbo frequency range.
441 * This just contains +1 MHZ above the max non turbo frequency,
442 * with control value corresponding to max turbo ratio. But
443 * when cpufreq set policy is called, it will call with this
444 * max frequency, which will cause a reduced performance as
445 * this driver uses real max turbo frequency as the max
446 * frequency. So correct this frequency in _PSS table to
447 * correct max turbo frequency based on the turbo state.
448 * Also need to convert to MHz as _PSS freq is in MHz.
449 */
450 if (!global.turbo_disabled)
451 cpu->acpi_perf_data.states[0].core_frequency =
452 policy->cpuinfo.max_freq / 1000;
453 cpu->valid_pss_table = true;
454 pr_debug("_PPC limits will be enforced\n");
455
456 return;
457
458 err:
459 cpu->valid_pss_table = false;
460 acpi_processor_unregister_performance(policy->cpu);
461}
462
463static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
464{
465 struct cpudata *cpu;
466
467 cpu = all_cpu_data[policy->cpu];
468 if (!cpu->valid_pss_table)
469 return;
470
471 acpi_processor_unregister_performance(policy->cpu);
472}
473
474static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
475{
476 return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
477}
478
479static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
480 struct cppc_perf_caps *caps)
481{
482 if (cppc_get_perf_caps(cpu->cpu, caps))
483 return false;
484
485 return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
486}
487#else /* CONFIG_ACPI */
488static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
489{
490}
491
492static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
493{
494}
495
496static inline bool intel_pstate_acpi_pm_profile_server(void)
497{
498 return false;
499}
500#endif /* CONFIG_ACPI */
501
502#ifndef CONFIG_ACPI_CPPC_LIB
503static inline int intel_pstate_get_cppc_guaranteed(int cpu)
504{
505 return -ENOTSUPP;
506}
507#endif /* CONFIG_ACPI_CPPC_LIB */
508
509static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
510{
511 pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu);
512
513 cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling;
514}
515
516/**
517 * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
518 * @cpu: Target CPU.
519 *
520 * On hybrid processors, HWP may expose more performance levels than there are
521 * P-states accessible through the PERF_CTL interface. If that happens, the
522 * scaling factor between HWP performance levels and CPU frequency will be less
523 * than the scaling factor between P-state values and CPU frequency.
524 *
525 * In that case, the scaling factor between HWP performance levels and CPU
526 * frequency needs to be determined which can be done with the help of the
527 * observation that certain HWP performance levels should correspond to certain
528 * P-states, like for example the HWP highest performance should correspond
529 * to the maximum turbo P-state of the CPU.
530 */
531static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
532{
533 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
534 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
535 int perf_ctl_turbo = pstate_funcs.get_turbo();
536 int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
537 int perf_ctl_max = pstate_funcs.get_max();
538 int max_freq = perf_ctl_max * perf_ctl_scaling;
539 int scaling = INT_MAX;
540 int freq;
541
542 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
543 pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max);
544 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
545 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
546
547 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
548 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
549
550#ifdef CONFIG_ACPI
551 if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) {
552 struct cppc_perf_caps caps;
553
554 if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
555 if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
556 pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
557
558 /*
559 * If the CPPC nominal performance is valid, it
560 * can be assumed to correspond to cpu_khz.
561 */
562 if (caps.nominal_perf == perf_ctl_max_phys) {
563 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
564 return;
565 }
566 scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
567 } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
568 pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
569
570 /*
571 * If the CPPC guaranteed performance is valid,
572 * it can be assumed to correspond to max_freq.
573 */
574 if (caps.guaranteed_perf == perf_ctl_max) {
575 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
576 return;
577 }
578 scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
579 }
580 }
581 }
582#endif
583 /*
584 * If using the CPPC data to compute the HWP-to-frequency scaling factor
585 * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
586 * the assumption that it corresponds to max_freq.
587 */
588 if (scaling > perf_ctl_scaling) {
589 pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu);
590
591 if (cpu->pstate.max_pstate == perf_ctl_max) {
592 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
593 return;
594 }
595 scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate);
596 if (scaling > perf_ctl_scaling) {
597 /*
598 * This should not happen, because it would mean that
599 * the number of HWP perf levels was less than the
600 * number of P-states, so use the PERF_CTL scaling in
601 * that case.
602 */
603 pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu,
604 scaling);
605
606 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
607 return;
608 }
609 }
610
611 /*
612 * If the product of the HWP performance scaling factor obtained above
613 * and the HWP_CAP highest performance is greater than the maximum turbo
614 * frequency corresponding to the pstate_funcs.get_turbo() return value,
615 * the scaling factor is too high, so recompute it so that the HWP_CAP
616 * highest performance corresponds to the maximum turbo frequency.
617 */
618 if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
619 pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling);
620
621 cpu->pstate.turbo_freq = turbo_freq;
622 scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
623 }
624
625 cpu->pstate.scaling = scaling;
626
627 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
628
629 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
630 perf_ctl_scaling);
631
632 freq = perf_ctl_max_phys * perf_ctl_scaling;
633 cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling);
634
635 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
636 /*
637 * Cast the min P-state value retrieved via pstate_funcs.get_min() to
638 * the effective range of HWP performance levels.
639 */
640 cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
641}
642
643static inline void update_turbo_state(void)
644{
645 u64 misc_en;
646 struct cpudata *cpu;
647
648 cpu = all_cpu_data[0];
649 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
650 global.turbo_disabled =
651 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
652 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
653}
654
655static int min_perf_pct_min(void)
656{
657 struct cpudata *cpu = all_cpu_data[0];
658 int turbo_pstate = cpu->pstate.turbo_pstate;
659
660 return turbo_pstate ?
661 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
662}
663
664static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
665{
666 u64 epb;
667 int ret;
668
669 if (!boot_cpu_has(X86_FEATURE_EPB))
670 return -ENXIO;
671
672 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
673 if (ret)
674 return (s16)ret;
675
676 return (s16)(epb & 0x0f);
677}
678
679static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
680{
681 s16 epp;
682
683 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
684 /*
685 * When hwp_req_data is 0, means that caller didn't read
686 * MSR_HWP_REQUEST, so need to read and get EPP.
687 */
688 if (!hwp_req_data) {
689 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
690 &hwp_req_data);
691 if (epp)
692 return epp;
693 }
694 epp = (hwp_req_data >> 24) & 0xff;
695 } else {
696 /* When there is no EPP present, HWP uses EPB settings */
697 epp = intel_pstate_get_epb(cpu_data);
698 }
699
700 return epp;
701}
702
703static int intel_pstate_set_epb(int cpu, s16 pref)
704{
705 u64 epb;
706 int ret;
707
708 if (!boot_cpu_has(X86_FEATURE_EPB))
709 return -ENXIO;
710
711 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
712 if (ret)
713 return ret;
714
715 epb = (epb & ~0x0f) | pref;
716 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
717
718 return 0;
719}
720
721/*
722 * EPP/EPB display strings corresponding to EPP index in the
723 * energy_perf_strings[]
724 * index String
725 *-------------------------------------
726 * 0 default
727 * 1 performance
728 * 2 balance_performance
729 * 3 balance_power
730 * 4 power
731 */
732static const char * const energy_perf_strings[] = {
733 "default",
734 "performance",
735 "balance_performance",
736 "balance_power",
737 "power",
738 NULL
739};
740static const unsigned int epp_values[] = {
741 HWP_EPP_PERFORMANCE,
742 HWP_EPP_BALANCE_PERFORMANCE,
743 HWP_EPP_BALANCE_POWERSAVE,
744 HWP_EPP_POWERSAVE
745};
746
747static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
748{
749 s16 epp;
750 int index = -EINVAL;
751
752 *raw_epp = 0;
753 epp = intel_pstate_get_epp(cpu_data, 0);
754 if (epp < 0)
755 return epp;
756
757 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
758 if (epp == HWP_EPP_PERFORMANCE)
759 return 1;
760 if (epp == HWP_EPP_BALANCE_PERFORMANCE)
761 return 2;
762 if (epp == HWP_EPP_BALANCE_POWERSAVE)
763 return 3;
764 if (epp == HWP_EPP_POWERSAVE)
765 return 4;
766 *raw_epp = epp;
767 return 0;
768 } else if (boot_cpu_has(X86_FEATURE_EPB)) {
769 /*
770 * Range:
771 * 0x00-0x03 : Performance
772 * 0x04-0x07 : Balance performance
773 * 0x08-0x0B : Balance power
774 * 0x0C-0x0F : Power
775 * The EPB is a 4 bit value, but our ranges restrict the
776 * value which can be set. Here only using top two bits
777 * effectively.
778 */
779 index = (epp >> 2) + 1;
780 }
781
782 return index;
783}
784
785static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
786{
787 int ret;
788
789 /*
790 * Use the cached HWP Request MSR value, because in the active mode the
791 * register itself may be updated by intel_pstate_hwp_boost_up() or
792 * intel_pstate_hwp_boost_down() at any time.
793 */
794 u64 value = READ_ONCE(cpu->hwp_req_cached);
795
796 value &= ~GENMASK_ULL(31, 24);
797 value |= (u64)epp << 24;
798 /*
799 * The only other updater of hwp_req_cached in the active mode,
800 * intel_pstate_hwp_set(), is called under the same lock as this
801 * function, so it cannot run in parallel with the update below.
802 */
803 WRITE_ONCE(cpu->hwp_req_cached, value);
804 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
805 if (!ret)
806 cpu->epp_cached = epp;
807
808 return ret;
809}
810
811static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
812 int pref_index, bool use_raw,
813 u32 raw_epp)
814{
815 int epp = -EINVAL;
816 int ret;
817
818 if (!pref_index)
819 epp = cpu_data->epp_default;
820
821 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
822 if (use_raw)
823 epp = raw_epp;
824 else if (epp == -EINVAL)
825 epp = epp_values[pref_index - 1];
826
827 /*
828 * To avoid confusion, refuse to set EPP to any values different
829 * from 0 (performance) if the current policy is "performance",
830 * because those values would be overridden.
831 */
832 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
833 return -EBUSY;
834
835 ret = intel_pstate_set_epp(cpu_data, epp);
836 } else {
837 if (epp == -EINVAL)
838 epp = (pref_index - 1) << 2;
839 ret = intel_pstate_set_epb(cpu_data->cpu, epp);
840 }
841
842 return ret;
843}
844
845static ssize_t show_energy_performance_available_preferences(
846 struct cpufreq_policy *policy, char *buf)
847{
848 int i = 0;
849 int ret = 0;
850
851 while (energy_perf_strings[i] != NULL)
852 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
853
854 ret += sprintf(&buf[ret], "\n");
855
856 return ret;
857}
858
859cpufreq_freq_attr_ro(energy_performance_available_preferences);
860
861static struct cpufreq_driver intel_pstate;
862
863static ssize_t store_energy_performance_preference(
864 struct cpufreq_policy *policy, const char *buf, size_t count)
865{
866 struct cpudata *cpu = all_cpu_data[policy->cpu];
867 char str_preference[21];
868 bool raw = false;
869 ssize_t ret;
870 u32 epp = 0;
871
872 ret = sscanf(buf, "%20s", str_preference);
873 if (ret != 1)
874 return -EINVAL;
875
876 ret = match_string(energy_perf_strings, -1, str_preference);
877 if (ret < 0) {
878 if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
879 return ret;
880
881 ret = kstrtouint(buf, 10, &epp);
882 if (ret)
883 return ret;
884
885 if (epp > 255)
886 return -EINVAL;
887
888 raw = true;
889 }
890
891 /*
892 * This function runs with the policy R/W semaphore held, which
893 * guarantees that the driver pointer will not change while it is
894 * running.
895 */
896 if (!intel_pstate_driver)
897 return -EAGAIN;
898
899 mutex_lock(&intel_pstate_limits_lock);
900
901 if (intel_pstate_driver == &intel_pstate) {
902 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
903 } else {
904 /*
905 * In the passive mode the governor needs to be stopped on the
906 * target CPU before the EPP update and restarted after it,
907 * which is super-heavy-weight, so make sure it is worth doing
908 * upfront.
909 */
910 if (!raw)
911 epp = ret ? epp_values[ret - 1] : cpu->epp_default;
912
913 if (cpu->epp_cached != epp) {
914 int err;
915
916 cpufreq_stop_governor(policy);
917 ret = intel_pstate_set_epp(cpu, epp);
918 err = cpufreq_start_governor(policy);
919 if (!ret)
920 ret = err;
921 }
922 }
923
924 mutex_unlock(&intel_pstate_limits_lock);
925
926 return ret ?: count;
927}
928
929static ssize_t show_energy_performance_preference(
930 struct cpufreq_policy *policy, char *buf)
931{
932 struct cpudata *cpu_data = all_cpu_data[policy->cpu];
933 int preference, raw_epp;
934
935 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
936 if (preference < 0)
937 return preference;
938
939 if (raw_epp)
940 return sprintf(buf, "%d\n", raw_epp);
941 else
942 return sprintf(buf, "%s\n", energy_perf_strings[preference]);
943}
944
945cpufreq_freq_attr_rw(energy_performance_preference);
946
947static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
948{
949 struct cpudata *cpu = all_cpu_data[policy->cpu];
950 int ratio, freq;
951
952 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
953 if (ratio <= 0) {
954 u64 cap;
955
956 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
957 ratio = HWP_GUARANTEED_PERF(cap);
958 }
959
960 freq = ratio * cpu->pstate.scaling;
961 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
962 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
963
964 return sprintf(buf, "%d\n", freq);
965}
966
967cpufreq_freq_attr_ro(base_frequency);
968
969static struct freq_attr *hwp_cpufreq_attrs[] = {
970 &energy_performance_preference,
971 &energy_performance_available_preferences,
972 &base_frequency,
973 NULL,
974};
975
976static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
977{
978 u64 cap;
979
980 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
981 WRITE_ONCE(cpu->hwp_cap_cached, cap);
982 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
983 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
984}
985
986static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
987{
988 int scaling = cpu->pstate.scaling;
989
990 __intel_pstate_get_hwp_cap(cpu);
991
992 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
993 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
994 if (scaling != cpu->pstate.perf_ctl_scaling) {
995 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
996
997 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
998 perf_ctl_scaling);
999 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
1000 perf_ctl_scaling);
1001 }
1002}
1003
1004static void intel_pstate_hwp_set(unsigned int cpu)
1005{
1006 struct cpudata *cpu_data = all_cpu_data[cpu];
1007 int max, min;
1008 u64 value;
1009 s16 epp;
1010
1011 max = cpu_data->max_perf_ratio;
1012 min = cpu_data->min_perf_ratio;
1013
1014 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
1015 min = max;
1016
1017 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
1018
1019 value &= ~HWP_MIN_PERF(~0L);
1020 value |= HWP_MIN_PERF(min);
1021
1022 value &= ~HWP_MAX_PERF(~0L);
1023 value |= HWP_MAX_PERF(max);
1024
1025 if (cpu_data->epp_policy == cpu_data->policy)
1026 goto skip_epp;
1027
1028 cpu_data->epp_policy = cpu_data->policy;
1029
1030 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
1031 epp = intel_pstate_get_epp(cpu_data, value);
1032 cpu_data->epp_powersave = epp;
1033 /* If EPP read was failed, then don't try to write */
1034 if (epp < 0)
1035 goto skip_epp;
1036
1037 epp = 0;
1038 } else {
1039 /* skip setting EPP, when saved value is invalid */
1040 if (cpu_data->epp_powersave < 0)
1041 goto skip_epp;
1042
1043 /*
1044 * No need to restore EPP when it is not zero. This
1045 * means:
1046 * - Policy is not changed
1047 * - user has manually changed
1048 * - Error reading EPB
1049 */
1050 epp = intel_pstate_get_epp(cpu_data, value);
1051 if (epp)
1052 goto skip_epp;
1053
1054 epp = cpu_data->epp_powersave;
1055 }
1056 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
1057 value &= ~GENMASK_ULL(31, 24);
1058 value |= (u64)epp << 24;
1059 } else {
1060 intel_pstate_set_epb(cpu, epp);
1061 }
1062skip_epp:
1063 WRITE_ONCE(cpu_data->hwp_req_cached, value);
1064 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
1065}
1066
1067static void intel_pstate_hwp_offline(struct cpudata *cpu)
1068{
1069 u64 value = READ_ONCE(cpu->hwp_req_cached);
1070 int min_perf;
1071
1072 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
1073 /*
1074 * In case the EPP has been set to "performance" by the
1075 * active mode "performance" scaling algorithm, replace that
1076 * temporary value with the cached EPP one.
1077 */
1078 value &= ~GENMASK_ULL(31, 24);
1079 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
1080 WRITE_ONCE(cpu->hwp_req_cached, value);
1081 }
1082
1083 value &= ~GENMASK_ULL(31, 0);
1084 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
1085
1086 /* Set hwp_max = hwp_min */
1087 value |= HWP_MAX_PERF(min_perf);
1088 value |= HWP_MIN_PERF(min_perf);
1089
1090 /* Set EPP to min */
1091 if (boot_cpu_has(X86_FEATURE_HWP_EPP))
1092 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
1093
1094 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
1095}
1096
1097#define POWER_CTL_EE_ENABLE 1
1098#define POWER_CTL_EE_DISABLE 2
1099
1100static int power_ctl_ee_state;
1101
1102static void set_power_ctl_ee_state(bool input)
1103{
1104 u64 power_ctl;
1105
1106 mutex_lock(&intel_pstate_driver_lock);
1107 rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1108 if (input) {
1109 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
1110 power_ctl_ee_state = POWER_CTL_EE_ENABLE;
1111 } else {
1112 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1113 power_ctl_ee_state = POWER_CTL_EE_DISABLE;
1114 }
1115 wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
1116 mutex_unlock(&intel_pstate_driver_lock);
1117}
1118
1119static void intel_pstate_hwp_enable(struct cpudata *cpudata);
1120
1121static void intel_pstate_hwp_reenable(struct cpudata *cpu)
1122{
1123 intel_pstate_hwp_enable(cpu);
1124 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
1125}
1126
1127static int intel_pstate_suspend(struct cpufreq_policy *policy)
1128{
1129 struct cpudata *cpu = all_cpu_data[policy->cpu];
1130
1131 pr_debug("CPU %d suspending\n", cpu->cpu);
1132
1133 cpu->suspended = true;
1134
1135 return 0;
1136}
1137
1138static int intel_pstate_resume(struct cpufreq_policy *policy)
1139{
1140 struct cpudata *cpu = all_cpu_data[policy->cpu];
1141
1142 pr_debug("CPU %d resuming\n", cpu->cpu);
1143
1144 /* Only restore if the system default is changed */
1145 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
1146 set_power_ctl_ee_state(true);
1147 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
1148 set_power_ctl_ee_state(false);
1149
1150 if (cpu->suspended && hwp_active) {
1151 mutex_lock(&intel_pstate_limits_lock);
1152
1153 /* Re-enable HWP, because "online" has not done that. */
1154 intel_pstate_hwp_reenable(cpu);
1155
1156 mutex_unlock(&intel_pstate_limits_lock);
1157 }
1158
1159 cpu->suspended = false;
1160
1161 return 0;
1162}
1163
1164static void intel_pstate_update_policies(void)
1165{
1166 int cpu;
1167
1168 for_each_possible_cpu(cpu)
1169 cpufreq_update_policy(cpu);
1170}
1171
1172static void intel_pstate_update_max_freq(unsigned int cpu)
1173{
1174 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
1175 struct cpudata *cpudata;
1176
1177 if (!policy)
1178 return;
1179
1180 cpudata = all_cpu_data[cpu];
1181 policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
1182 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
1183
1184 refresh_frequency_limits(policy);
1185
1186 cpufreq_cpu_release(policy);
1187}
1188
1189static void intel_pstate_update_limits(unsigned int cpu)
1190{
1191 mutex_lock(&intel_pstate_driver_lock);
1192
1193 update_turbo_state();
1194 /*
1195 * If turbo has been turned on or off globally, policy limits for
1196 * all CPUs need to be updated to reflect that.
1197 */
1198 if (global.turbo_disabled_mf != global.turbo_disabled) {
1199 global.turbo_disabled_mf = global.turbo_disabled;
1200 arch_set_max_freq_ratio(global.turbo_disabled);
1201 for_each_possible_cpu(cpu)
1202 intel_pstate_update_max_freq(cpu);
1203 } else {
1204 cpufreq_update_policy(cpu);
1205 }
1206
1207 mutex_unlock(&intel_pstate_driver_lock);
1208}
1209
1210/************************** sysfs begin ************************/
1211#define show_one(file_name, object) \
1212 static ssize_t show_##file_name \
1213 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
1214 { \
1215 return sprintf(buf, "%u\n", global.object); \
1216 }
1217
1218static ssize_t intel_pstate_show_status(char *buf);
1219static int intel_pstate_update_status(const char *buf, size_t size);
1220
1221static ssize_t show_status(struct kobject *kobj,
1222 struct kobj_attribute *attr, char *buf)
1223{
1224 ssize_t ret;
1225
1226 mutex_lock(&intel_pstate_driver_lock);
1227 ret = intel_pstate_show_status(buf);
1228 mutex_unlock(&intel_pstate_driver_lock);
1229
1230 return ret;
1231}
1232
1233static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
1234 const char *buf, size_t count)
1235{
1236 char *p = memchr(buf, '\n', count);
1237 int ret;
1238
1239 mutex_lock(&intel_pstate_driver_lock);
1240 ret = intel_pstate_update_status(buf, p ? p - buf : count);
1241 mutex_unlock(&intel_pstate_driver_lock);
1242
1243 return ret < 0 ? ret : count;
1244}
1245
1246static ssize_t show_turbo_pct(struct kobject *kobj,
1247 struct kobj_attribute *attr, char *buf)
1248{
1249 struct cpudata *cpu;
1250 int total, no_turbo, turbo_pct;
1251 uint32_t turbo_fp;
1252
1253 mutex_lock(&intel_pstate_driver_lock);
1254
1255 if (!intel_pstate_driver) {
1256 mutex_unlock(&intel_pstate_driver_lock);
1257 return -EAGAIN;
1258 }
1259
1260 cpu = all_cpu_data[0];
1261
1262 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1263 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1264 turbo_fp = div_fp(no_turbo, total);
1265 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
1266
1267 mutex_unlock(&intel_pstate_driver_lock);
1268
1269 return sprintf(buf, "%u\n", turbo_pct);
1270}
1271
1272static ssize_t show_num_pstates(struct kobject *kobj,
1273 struct kobj_attribute *attr, char *buf)
1274{
1275 struct cpudata *cpu;
1276 int total;
1277
1278 mutex_lock(&intel_pstate_driver_lock);
1279
1280 if (!intel_pstate_driver) {
1281 mutex_unlock(&intel_pstate_driver_lock);
1282 return -EAGAIN;
1283 }
1284
1285 cpu = all_cpu_data[0];
1286 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1287
1288 mutex_unlock(&intel_pstate_driver_lock);
1289
1290 return sprintf(buf, "%u\n", total);
1291}
1292
1293static ssize_t show_no_turbo(struct kobject *kobj,
1294 struct kobj_attribute *attr, char *buf)
1295{
1296 ssize_t ret;
1297
1298 mutex_lock(&intel_pstate_driver_lock);
1299
1300 if (!intel_pstate_driver) {
1301 mutex_unlock(&intel_pstate_driver_lock);
1302 return -EAGAIN;
1303 }
1304
1305 update_turbo_state();
1306 if (global.turbo_disabled)
1307 ret = sprintf(buf, "%u\n", global.turbo_disabled);
1308 else
1309 ret = sprintf(buf, "%u\n", global.no_turbo);
1310
1311 mutex_unlock(&intel_pstate_driver_lock);
1312
1313 return ret;
1314}
1315
1316static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1317 const char *buf, size_t count)
1318{
1319 unsigned int input;
1320 int ret;
1321
1322 ret = sscanf(buf, "%u", &input);
1323 if (ret != 1)
1324 return -EINVAL;
1325
1326 mutex_lock(&intel_pstate_driver_lock);
1327
1328 if (!intel_pstate_driver) {
1329 mutex_unlock(&intel_pstate_driver_lock);
1330 return -EAGAIN;
1331 }
1332
1333 mutex_lock(&intel_pstate_limits_lock);
1334
1335 update_turbo_state();
1336 if (global.turbo_disabled) {
1337 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
1338 mutex_unlock(&intel_pstate_limits_lock);
1339 mutex_unlock(&intel_pstate_driver_lock);
1340 return -EPERM;
1341 }
1342
1343 global.no_turbo = clamp_t(int, input, 0, 1);
1344
1345 if (global.no_turbo) {
1346 struct cpudata *cpu = all_cpu_data[0];
1347 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1348
1349 /* Squash the global minimum into the permitted range. */
1350 if (global.min_perf_pct > pct)
1351 global.min_perf_pct = pct;
1352 }
1353
1354 mutex_unlock(&intel_pstate_limits_lock);
1355
1356 intel_pstate_update_policies();
1357
1358 mutex_unlock(&intel_pstate_driver_lock);
1359
1360 return count;
1361}
1362
1363static void update_qos_request(enum freq_qos_req_type type)
1364{
1365 struct freq_qos_request *req;
1366 struct cpufreq_policy *policy;
1367 int i;
1368
1369 for_each_possible_cpu(i) {
1370 struct cpudata *cpu = all_cpu_data[i];
1371 unsigned int freq, perf_pct;
1372
1373 policy = cpufreq_cpu_get(i);
1374 if (!policy)
1375 continue;
1376
1377 req = policy->driver_data;
1378 cpufreq_cpu_put(policy);
1379
1380 if (!req)
1381 continue;
1382
1383 if (hwp_active)
1384 intel_pstate_get_hwp_cap(cpu);
1385
1386 if (type == FREQ_QOS_MIN) {
1387 perf_pct = global.min_perf_pct;
1388 } else {
1389 req++;
1390 perf_pct = global.max_perf_pct;
1391 }
1392
1393 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
1394
1395 if (freq_qos_update_request(req, freq) < 0)
1396 pr_warn("Failed to update freq constraint: CPU%d\n", i);
1397 }
1398}
1399
1400static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1401 const char *buf, size_t count)
1402{
1403 unsigned int input;
1404 int ret;
1405
1406 ret = sscanf(buf, "%u", &input);
1407 if (ret != 1)
1408 return -EINVAL;
1409
1410 mutex_lock(&intel_pstate_driver_lock);
1411
1412 if (!intel_pstate_driver) {
1413 mutex_unlock(&intel_pstate_driver_lock);
1414 return -EAGAIN;
1415 }
1416
1417 mutex_lock(&intel_pstate_limits_lock);
1418
1419 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1420
1421 mutex_unlock(&intel_pstate_limits_lock);
1422
1423 if (intel_pstate_driver == &intel_pstate)
1424 intel_pstate_update_policies();
1425 else
1426 update_qos_request(FREQ_QOS_MAX);
1427
1428 mutex_unlock(&intel_pstate_driver_lock);
1429
1430 return count;
1431}
1432
1433static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1434 const char *buf, size_t count)
1435{
1436 unsigned int input;
1437 int ret;
1438
1439 ret = sscanf(buf, "%u", &input);
1440 if (ret != 1)
1441 return -EINVAL;
1442
1443 mutex_lock(&intel_pstate_driver_lock);
1444
1445 if (!intel_pstate_driver) {
1446 mutex_unlock(&intel_pstate_driver_lock);
1447 return -EAGAIN;
1448 }
1449
1450 mutex_lock(&intel_pstate_limits_lock);
1451
1452 global.min_perf_pct = clamp_t(int, input,
1453 min_perf_pct_min(), global.max_perf_pct);
1454
1455 mutex_unlock(&intel_pstate_limits_lock);
1456
1457 if (intel_pstate_driver == &intel_pstate)
1458 intel_pstate_update_policies();
1459 else
1460 update_qos_request(FREQ_QOS_MIN);
1461
1462 mutex_unlock(&intel_pstate_driver_lock);
1463
1464 return count;
1465}
1466
1467static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1468 struct kobj_attribute *attr, char *buf)
1469{
1470 return sprintf(buf, "%u\n", hwp_boost);
1471}
1472
1473static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1474 struct kobj_attribute *b,
1475 const char *buf, size_t count)
1476{
1477 unsigned int input;
1478 int ret;
1479
1480 ret = kstrtouint(buf, 10, &input);
1481 if (ret)
1482 return ret;
1483
1484 mutex_lock(&intel_pstate_driver_lock);
1485 hwp_boost = !!input;
1486 intel_pstate_update_policies();
1487 mutex_unlock(&intel_pstate_driver_lock);
1488
1489 return count;
1490}
1491
1492static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
1493 char *buf)
1494{
1495 u64 power_ctl;
1496 int enable;
1497
1498 rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1499 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
1500 return sprintf(buf, "%d\n", !enable);
1501}
1502
1503static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
1504 const char *buf, size_t count)
1505{
1506 bool input;
1507 int ret;
1508
1509 ret = kstrtobool(buf, &input);
1510 if (ret)
1511 return ret;
1512
1513 set_power_ctl_ee_state(input);
1514
1515 return count;
1516}
1517
1518show_one(max_perf_pct, max_perf_pct);
1519show_one(min_perf_pct, min_perf_pct);
1520
1521define_one_global_rw(status);
1522define_one_global_rw(no_turbo);
1523define_one_global_rw(max_perf_pct);
1524define_one_global_rw(min_perf_pct);
1525define_one_global_ro(turbo_pct);
1526define_one_global_ro(num_pstates);
1527define_one_global_rw(hwp_dynamic_boost);
1528define_one_global_rw(energy_efficiency);
1529
1530static struct attribute *intel_pstate_attributes[] = {
1531 &status.attr,
1532 &no_turbo.attr,
1533 NULL
1534};
1535
1536static const struct attribute_group intel_pstate_attr_group = {
1537 .attrs = intel_pstate_attributes,
1538};
1539
1540static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
1541
1542static struct kobject *intel_pstate_kobject;
1543
1544static void __init intel_pstate_sysfs_expose_params(void)
1545{
1546 int rc;
1547
1548 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
1549 &cpu_subsys.dev_root->kobj);
1550 if (WARN_ON(!intel_pstate_kobject))
1551 return;
1552
1553 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1554 if (WARN_ON(rc))
1555 return;
1556
1557 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1558 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
1559 WARN_ON(rc);
1560
1561 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
1562 WARN_ON(rc);
1563 }
1564
1565 /*
1566 * If per cpu limits are enforced there are no global limits, so
1567 * return without creating max/min_perf_pct attributes
1568 */
1569 if (per_cpu_limits)
1570 return;
1571
1572 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1573 WARN_ON(rc);
1574
1575 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1576 WARN_ON(rc);
1577
1578 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
1579 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
1580 WARN_ON(rc);
1581 }
1582}
1583
1584static void __init intel_pstate_sysfs_remove(void)
1585{
1586 if (!intel_pstate_kobject)
1587 return;
1588
1589 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
1590
1591 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1592 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
1593 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
1594 }
1595
1596 if (!per_cpu_limits) {
1597 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
1598 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
1599
1600 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
1601 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
1602 }
1603
1604 kobject_put(intel_pstate_kobject);
1605}
1606
1607static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
1608{
1609 int rc;
1610
1611 if (!hwp_active)
1612 return;
1613
1614 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1615 WARN_ON_ONCE(rc);
1616}
1617
1618static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1619{
1620 if (!hwp_active)
1621 return;
1622
1623 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1624}
1625
1626/************************** sysfs end ************************/
1627
1628static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1629{
1630 /* First disable HWP notification interrupt as we don't process them */
1631 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1632 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1633
1634 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1635 if (cpudata->epp_default == -EINVAL)
1636 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1637}
1638
1639static int atom_get_min_pstate(void)
1640{
1641 u64 value;
1642
1643 rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1644 return (value >> 8) & 0x7F;
1645}
1646
1647static int atom_get_max_pstate(void)
1648{
1649 u64 value;
1650
1651 rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1652 return (value >> 16) & 0x7F;
1653}
1654
1655static int atom_get_turbo_pstate(void)
1656{
1657 u64 value;
1658
1659 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
1660 return value & 0x7F;
1661}
1662
1663static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1664{
1665 u64 val;
1666 int32_t vid_fp;
1667 u32 vid;
1668
1669 val = (u64)pstate << 8;
1670 if (global.no_turbo && !global.turbo_disabled)
1671 val |= (u64)1 << 32;
1672
1673 vid_fp = cpudata->vid.min + mul_fp(
1674 int_tofp(pstate - cpudata->pstate.min_pstate),
1675 cpudata->vid.ratio);
1676
1677 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1678 vid = ceiling_fp(vid_fp);
1679
1680 if (pstate > cpudata->pstate.max_pstate)
1681 vid = cpudata->vid.turbo;
1682
1683 return val | vid;
1684}
1685
1686static int silvermont_get_scaling(void)
1687{
1688 u64 value;
1689 int i;
1690 /* Defined in Table 35-6 from SDM (Sept 2015) */
1691 static int silvermont_freq_table[] = {
1692 83300, 100000, 133300, 116700, 80000};
1693
1694 rdmsrl(MSR_FSB_FREQ, value);
1695 i = value & 0x7;
1696 WARN_ON(i > 4);
1697
1698 return silvermont_freq_table[i];
1699}
1700
1701static int airmont_get_scaling(void)
1702{
1703 u64 value;
1704 int i;
1705 /* Defined in Table 35-10 from SDM (Sept 2015) */
1706 static int airmont_freq_table[] = {
1707 83300, 100000, 133300, 116700, 80000,
1708 93300, 90000, 88900, 87500};
1709
1710 rdmsrl(MSR_FSB_FREQ, value);
1711 i = value & 0xF;
1712 WARN_ON(i > 8);
1713
1714 return airmont_freq_table[i];
1715}
1716
1717static void atom_get_vid(struct cpudata *cpudata)
1718{
1719 u64 value;
1720
1721 rdmsrl(MSR_ATOM_CORE_VIDS, value);
1722 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1723 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1724 cpudata->vid.ratio = div_fp(
1725 cpudata->vid.max - cpudata->vid.min,
1726 int_tofp(cpudata->pstate.max_pstate -
1727 cpudata->pstate.min_pstate));
1728
1729 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
1730 cpudata->vid.turbo = value & 0x7f;
1731}
1732
1733static int core_get_min_pstate(void)
1734{
1735 u64 value;
1736
1737 rdmsrl(MSR_PLATFORM_INFO, value);
1738 return (value >> 40) & 0xFF;
1739}
1740
1741static int core_get_max_pstate_physical(void)
1742{
1743 u64 value;
1744
1745 rdmsrl(MSR_PLATFORM_INFO, value);
1746 return (value >> 8) & 0xFF;
1747}
1748
1749static int core_get_tdp_ratio(u64 plat_info)
1750{
1751 /* Check how many TDP levels present */
1752 if (plat_info & 0x600000000) {
1753 u64 tdp_ctrl;
1754 u64 tdp_ratio;
1755 int tdp_msr;
1756 int err;
1757
1758 /* Get the TDP level (0, 1, 2) to get ratios */
1759 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1760 if (err)
1761 return err;
1762
1763 /* TDP MSR are continuous starting at 0x648 */
1764 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
1765 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
1766 if (err)
1767 return err;
1768
1769 /* For level 1 and 2, bits[23:16] contain the ratio */
1770 if (tdp_ctrl & 0x03)
1771 tdp_ratio >>= 16;
1772
1773 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1774 pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
1775
1776 return (int)tdp_ratio;
1777 }
1778
1779 return -ENXIO;
1780}
1781
1782static int core_get_max_pstate(void)
1783{
1784 u64 tar;
1785 u64 plat_info;
1786 int max_pstate;
1787 int tdp_ratio;
1788 int err;
1789
1790 rdmsrl(MSR_PLATFORM_INFO, plat_info);
1791 max_pstate = (plat_info >> 8) & 0xFF;
1792
1793 tdp_ratio = core_get_tdp_ratio(plat_info);
1794 if (tdp_ratio <= 0)
1795 return max_pstate;
1796
1797 if (hwp_active) {
1798 /* Turbo activation ratio is not used on HWP platforms */
1799 return tdp_ratio;
1800 }
1801
1802 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
1803 if (!err) {
1804 int tar_levels;
1805
1806 /* Do some sanity checking for safety */
1807 tar_levels = tar & 0xff;
1808 if (tdp_ratio - 1 == tar_levels) {
1809 max_pstate = tar_levels;
1810 pr_debug("max_pstate=TAC %x\n", max_pstate);
1811 }
1812 }
1813
1814 return max_pstate;
1815}
1816
1817static int core_get_turbo_pstate(void)
1818{
1819 u64 value;
1820 int nont, ret;
1821
1822 rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1823 nont = core_get_max_pstate();
1824 ret = (value) & 255;
1825 if (ret <= nont)
1826 ret = nont;
1827 return ret;
1828}
1829
1830static inline int core_get_scaling(void)
1831{
1832 return 100000;
1833}
1834
1835static u64 core_get_val(struct cpudata *cpudata, int pstate)
1836{
1837 u64 val;
1838
1839 val = (u64)pstate << 8;
1840 if (global.no_turbo && !global.turbo_disabled)
1841 val |= (u64)1 << 32;
1842
1843 return val;
1844}
1845
1846static int knl_get_aperf_mperf_shift(void)
1847{
1848 return 10;
1849}
1850
1851static int knl_get_turbo_pstate(void)
1852{
1853 u64 value;
1854 int nont, ret;
1855
1856 rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1857 nont = core_get_max_pstate();
1858 ret = (((value) >> 8) & 0xFF);
1859 if (ret <= nont)
1860 ret = nont;
1861 return ret;
1862}
1863
1864static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1865{
1866 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1867 cpu->pstate.current_pstate = pstate;
1868 /*
1869 * Generally, there is no guarantee that this code will always run on
1870 * the CPU being updated, so force the register update to run on the
1871 * right CPU.
1872 */
1873 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
1874 pstate_funcs.get_val(cpu, pstate));
1875}
1876
1877static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1878{
1879 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1880}
1881
1882static void intel_pstate_max_within_limits(struct cpudata *cpu)
1883{
1884 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1885
1886 update_turbo_state();
1887 intel_pstate_set_pstate(cpu, pstate);
1888}
1889
1890static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1891{
1892 bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU);
1893 int perf_ctl_max_phys = pstate_funcs.get_max_physical();
1894 int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys :
1895 pstate_funcs.get_scaling();
1896
1897 cpu->pstate.min_pstate = pstate_funcs.get_min();
1898 cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
1899 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
1900
1901 if (hwp_active && !hwp_mode_bdw) {
1902 __intel_pstate_get_hwp_cap(cpu);
1903
1904 if (hybrid_cpu)
1905 intel_pstate_hybrid_hwp_calibrate(cpu);
1906 else
1907 cpu->pstate.scaling = perf_ctl_scaling;
1908 } else {
1909 cpu->pstate.scaling = perf_ctl_scaling;
1910 cpu->pstate.max_pstate = pstate_funcs.get_max();
1911 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1912 }
1913
1914 if (cpu->pstate.scaling == perf_ctl_scaling) {
1915 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
1916 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
1917 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
1918 }
1919
1920 if (pstate_funcs.get_aperf_mperf_shift)
1921 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1922
1923 if (pstate_funcs.get_vid)
1924 pstate_funcs.get_vid(cpu);
1925
1926 intel_pstate_set_min_pstate(cpu);
1927}
1928
1929/*
1930 * Long hold time will keep high perf limits for long time,
1931 * which negatively impacts perf/watt for some workloads,
1932 * like specpower. 3ms is based on experiements on some
1933 * workoads.
1934 */
1935static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
1936
1937static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
1938{
1939 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
1940 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
1941 u32 max_limit = (hwp_req & 0xff00) >> 8;
1942 u32 min_limit = (hwp_req & 0xff);
1943 u32 boost_level1;
1944
1945 /*
1946 * Cases to consider (User changes via sysfs or boot time):
1947 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
1948 * No boost, return.
1949 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
1950 * Should result in one level boost only for P0.
1951 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
1952 * Should result in two level boost:
1953 * (min + p1)/2 and P1.
1954 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
1955 * Should result in three level boost:
1956 * (min + p1)/2, P1 and P0.
1957 */
1958
1959 /* If max and min are equal or already at max, nothing to boost */
1960 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
1961 return;
1962
1963 if (!cpu->hwp_boost_min)
1964 cpu->hwp_boost_min = min_limit;
1965
1966 /* level at half way mark between min and guranteed */
1967 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
1968
1969 if (cpu->hwp_boost_min < boost_level1)
1970 cpu->hwp_boost_min = boost_level1;
1971 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
1972 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
1973 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
1974 max_limit != HWP_GUARANTEED_PERF(hwp_cap))
1975 cpu->hwp_boost_min = max_limit;
1976 else
1977 return;
1978
1979 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
1980 wrmsrl(MSR_HWP_REQUEST, hwp_req);
1981 cpu->last_update = cpu->sample.time;
1982}
1983
1984static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
1985{
1986 if (cpu->hwp_boost_min) {
1987 bool expired;
1988
1989 /* Check if we are idle for hold time to boost down */
1990 expired = time_after64(cpu->sample.time, cpu->last_update +
1991 hwp_boost_hold_time_ns);
1992 if (expired) {
1993 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
1994 cpu->hwp_boost_min = 0;
1995 }
1996 }
1997 cpu->last_update = cpu->sample.time;
1998}
1999
2000static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
2001 u64 time)
2002{
2003 cpu->sample.time = time;
2004
2005 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
2006 bool do_io = false;
2007
2008 cpu->sched_flags = 0;
2009 /*
2010 * Set iowait_boost flag and update time. Since IO WAIT flag
2011 * is set all the time, we can't just conclude that there is
2012 * some IO bound activity is scheduled on this CPU with just
2013 * one occurrence. If we receive at least two in two
2014 * consecutive ticks, then we treat as boost candidate.
2015 */
2016 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
2017 do_io = true;
2018
2019 cpu->last_io_update = time;
2020
2021 if (do_io)
2022 intel_pstate_hwp_boost_up(cpu);
2023
2024 } else {
2025 intel_pstate_hwp_boost_down(cpu);
2026 }
2027}
2028
2029static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
2030 u64 time, unsigned int flags)
2031{
2032 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2033
2034 cpu->sched_flags |= flags;
2035
2036 if (smp_processor_id() == cpu->cpu)
2037 intel_pstate_update_util_hwp_local(cpu, time);
2038}
2039
2040static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
2041{
2042 struct sample *sample = &cpu->sample;
2043
2044 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
2045}
2046
2047static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
2048{
2049 u64 aperf, mperf;
2050 unsigned long flags;
2051 u64 tsc;
2052
2053 local_irq_save(flags);
2054 rdmsrl(MSR_IA32_APERF, aperf);
2055 rdmsrl(MSR_IA32_MPERF, mperf);
2056 tsc = rdtsc();
2057 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
2058 local_irq_restore(flags);
2059 return false;
2060 }
2061 local_irq_restore(flags);
2062
2063 cpu->last_sample_time = cpu->sample.time;
2064 cpu->sample.time = time;
2065 cpu->sample.aperf = aperf;
2066 cpu->sample.mperf = mperf;
2067 cpu->sample.tsc = tsc;
2068 cpu->sample.aperf -= cpu->prev_aperf;
2069 cpu->sample.mperf -= cpu->prev_mperf;
2070 cpu->sample.tsc -= cpu->prev_tsc;
2071
2072 cpu->prev_aperf = aperf;
2073 cpu->prev_mperf = mperf;
2074 cpu->prev_tsc = tsc;
2075 /*
2076 * First time this function is invoked in a given cycle, all of the
2077 * previous sample data fields are equal to zero or stale and they must
2078 * be populated with meaningful numbers for things to work, so assume
2079 * that sample.time will always be reset before setting the utilization
2080 * update hook and make the caller skip the sample then.
2081 */
2082 if (cpu->last_sample_time) {
2083 intel_pstate_calc_avg_perf(cpu);
2084 return true;
2085 }
2086 return false;
2087}
2088
2089static inline int32_t get_avg_frequency(struct cpudata *cpu)
2090{
2091 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
2092}
2093
2094static inline int32_t get_avg_pstate(struct cpudata *cpu)
2095{
2096 return mul_ext_fp(cpu->pstate.max_pstate_physical,
2097 cpu->sample.core_avg_perf);
2098}
2099
2100static inline int32_t get_target_pstate(struct cpudata *cpu)
2101{
2102 struct sample *sample = &cpu->sample;
2103 int32_t busy_frac;
2104 int target, avg_pstate;
2105
2106 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
2107 sample->tsc);
2108
2109 if (busy_frac < cpu->iowait_boost)
2110 busy_frac = cpu->iowait_boost;
2111
2112 sample->busy_scaled = busy_frac * 100;
2113
2114 target = global.no_turbo || global.turbo_disabled ?
2115 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2116 target += target >> 2;
2117 target = mul_fp(target, busy_frac);
2118 if (target < cpu->pstate.min_pstate)
2119 target = cpu->pstate.min_pstate;
2120
2121 /*
2122 * If the average P-state during the previous cycle was higher than the
2123 * current target, add 50% of the difference to the target to reduce
2124 * possible performance oscillations and offset possible performance
2125 * loss related to moving the workload from one CPU to another within
2126 * a package/module.
2127 */
2128 avg_pstate = get_avg_pstate(cpu);
2129 if (avg_pstate > target)
2130 target += (avg_pstate - target) >> 1;
2131
2132 return target;
2133}
2134
2135static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
2136{
2137 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
2138 int max_pstate = max(min_pstate, cpu->max_perf_ratio);
2139
2140 return clamp_t(int, pstate, min_pstate, max_pstate);
2141}
2142
2143static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
2144{
2145 if (pstate == cpu->pstate.current_pstate)
2146 return;
2147
2148 cpu->pstate.current_pstate = pstate;
2149 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
2150}
2151
2152static void intel_pstate_adjust_pstate(struct cpudata *cpu)
2153{
2154 int from = cpu->pstate.current_pstate;
2155 struct sample *sample;
2156 int target_pstate;
2157
2158 update_turbo_state();
2159
2160 target_pstate = get_target_pstate(cpu);
2161 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2162 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
2163 intel_pstate_update_pstate(cpu, target_pstate);
2164
2165 sample = &cpu->sample;
2166 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
2167 fp_toint(sample->busy_scaled),
2168 from,
2169 cpu->pstate.current_pstate,
2170 sample->mperf,
2171 sample->aperf,
2172 sample->tsc,
2173 get_avg_frequency(cpu),
2174 fp_toint(cpu->iowait_boost * 100));
2175}
2176
2177static void intel_pstate_update_util(struct update_util_data *data, u64 time,
2178 unsigned int flags)
2179{
2180 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2181 u64 delta_ns;
2182
2183 /* Don't allow remote callbacks */
2184 if (smp_processor_id() != cpu->cpu)
2185 return;
2186
2187 delta_ns = time - cpu->last_update;
2188 if (flags & SCHED_CPUFREQ_IOWAIT) {
2189 /* Start over if the CPU may have been idle. */
2190 if (delta_ns > TICK_NSEC) {
2191 cpu->iowait_boost = ONE_EIGHTH_FP;
2192 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
2193 cpu->iowait_boost <<= 1;
2194 if (cpu->iowait_boost > int_tofp(1))
2195 cpu->iowait_boost = int_tofp(1);
2196 } else {
2197 cpu->iowait_boost = ONE_EIGHTH_FP;
2198 }
2199 } else if (cpu->iowait_boost) {
2200 /* Clear iowait_boost if the CPU may have been idle. */
2201 if (delta_ns > TICK_NSEC)
2202 cpu->iowait_boost = 0;
2203 else
2204 cpu->iowait_boost >>= 1;
2205 }
2206 cpu->last_update = time;
2207 delta_ns = time - cpu->sample.time;
2208 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
2209 return;
2210
2211 if (intel_pstate_sample(cpu, time))
2212 intel_pstate_adjust_pstate(cpu);
2213}
2214
2215static struct pstate_funcs core_funcs = {
2216 .get_max = core_get_max_pstate,
2217 .get_max_physical = core_get_max_pstate_physical,
2218 .get_min = core_get_min_pstate,
2219 .get_turbo = core_get_turbo_pstate,
2220 .get_scaling = core_get_scaling,
2221 .get_val = core_get_val,
2222};
2223
2224static const struct pstate_funcs silvermont_funcs = {
2225 .get_max = atom_get_max_pstate,
2226 .get_max_physical = atom_get_max_pstate,
2227 .get_min = atom_get_min_pstate,
2228 .get_turbo = atom_get_turbo_pstate,
2229 .get_val = atom_get_val,
2230 .get_scaling = silvermont_get_scaling,
2231 .get_vid = atom_get_vid,
2232};
2233
2234static const struct pstate_funcs airmont_funcs = {
2235 .get_max = atom_get_max_pstate,
2236 .get_max_physical = atom_get_max_pstate,
2237 .get_min = atom_get_min_pstate,
2238 .get_turbo = atom_get_turbo_pstate,
2239 .get_val = atom_get_val,
2240 .get_scaling = airmont_get_scaling,
2241 .get_vid = atom_get_vid,
2242};
2243
2244static const struct pstate_funcs knl_funcs = {
2245 .get_max = core_get_max_pstate,
2246 .get_max_physical = core_get_max_pstate_physical,
2247 .get_min = core_get_min_pstate,
2248 .get_turbo = knl_get_turbo_pstate,
2249 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
2250 .get_scaling = core_get_scaling,
2251 .get_val = core_get_val,
2252};
2253
2254#define X86_MATCH(model, policy) \
2255 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
2256 X86_FEATURE_APERFMPERF, &policy)
2257
2258static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
2259 X86_MATCH(SANDYBRIDGE, core_funcs),
2260 X86_MATCH(SANDYBRIDGE_X, core_funcs),
2261 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
2262 X86_MATCH(IVYBRIDGE, core_funcs),
2263 X86_MATCH(HASWELL, core_funcs),
2264 X86_MATCH(BROADWELL, core_funcs),
2265 X86_MATCH(IVYBRIDGE_X, core_funcs),
2266 X86_MATCH(HASWELL_X, core_funcs),
2267 X86_MATCH(HASWELL_L, core_funcs),
2268 X86_MATCH(HASWELL_G, core_funcs),
2269 X86_MATCH(BROADWELL_G, core_funcs),
2270 X86_MATCH(ATOM_AIRMONT, airmont_funcs),
2271 X86_MATCH(SKYLAKE_L, core_funcs),
2272 X86_MATCH(BROADWELL_X, core_funcs),
2273 X86_MATCH(SKYLAKE, core_funcs),
2274 X86_MATCH(BROADWELL_D, core_funcs),
2275 X86_MATCH(XEON_PHI_KNL, knl_funcs),
2276 X86_MATCH(XEON_PHI_KNM, knl_funcs),
2277 X86_MATCH(ATOM_GOLDMONT, core_funcs),
2278 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
2279 X86_MATCH(SKYLAKE_X, core_funcs),
2280 X86_MATCH(COMETLAKE, core_funcs),
2281 X86_MATCH(ICELAKE_X, core_funcs),
2282 {}
2283};
2284MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
2285
2286static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
2287 X86_MATCH(BROADWELL_D, core_funcs),
2288 X86_MATCH(BROADWELL_X, core_funcs),
2289 X86_MATCH(SKYLAKE_X, core_funcs),
2290 {}
2291};
2292
2293static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
2294 X86_MATCH(KABYLAKE, core_funcs),
2295 {}
2296};
2297
2298static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
2299 X86_MATCH(SKYLAKE_X, core_funcs),
2300 X86_MATCH(SKYLAKE, core_funcs),
2301 {}
2302};
2303
2304static int intel_pstate_init_cpu(unsigned int cpunum)
2305{
2306 struct cpudata *cpu;
2307
2308 cpu = all_cpu_data[cpunum];
2309
2310 if (!cpu) {
2311 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
2312 if (!cpu)
2313 return -ENOMEM;
2314
2315 all_cpu_data[cpunum] = cpu;
2316
2317 cpu->cpu = cpunum;
2318
2319 cpu->epp_default = -EINVAL;
2320
2321 if (hwp_active) {
2322 const struct x86_cpu_id *id;
2323
2324 intel_pstate_hwp_enable(cpu);
2325
2326 id = x86_match_cpu(intel_pstate_hwp_boost_ids);
2327 if (id && intel_pstate_acpi_pm_profile_server())
2328 hwp_boost = true;
2329 }
2330 } else if (hwp_active) {
2331 /*
2332 * Re-enable HWP in case this happens after a resume from ACPI
2333 * S3 if the CPU was offline during the whole system/resume
2334 * cycle.
2335 */
2336 intel_pstate_hwp_reenable(cpu);
2337 }
2338
2339 cpu->epp_powersave = -EINVAL;
2340 cpu->epp_policy = 0;
2341
2342 intel_pstate_get_cpu_pstates(cpu);
2343
2344 pr_debug("controlling: cpu %d\n", cpunum);
2345
2346 return 0;
2347}
2348
2349static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2350{
2351 struct cpudata *cpu = all_cpu_data[cpu_num];
2352
2353 if (hwp_active && !hwp_boost)
2354 return;
2355
2356 if (cpu->update_util_set)
2357 return;
2358
2359 /* Prevent intel_pstate_update_util() from using stale data. */
2360 cpu->sample.time = 0;
2361 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2362 (hwp_active ?
2363 intel_pstate_update_util_hwp :
2364 intel_pstate_update_util));
2365 cpu->update_util_set = true;
2366}
2367
2368static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2369{
2370 struct cpudata *cpu_data = all_cpu_data[cpu];
2371
2372 if (!cpu_data->update_util_set)
2373 return;
2374
2375 cpufreq_remove_update_util_hook(cpu);
2376 cpu_data->update_util_set = false;
2377 synchronize_rcu();
2378}
2379
2380static int intel_pstate_get_max_freq(struct cpudata *cpu)
2381{
2382 return global.turbo_disabled || global.no_turbo ?
2383 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2384}
2385
2386static void intel_pstate_update_perf_limits(struct cpudata *cpu,
2387 unsigned int policy_min,
2388 unsigned int policy_max)
2389{
2390 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
2391 int32_t max_policy_perf, min_policy_perf;
2392
2393 max_policy_perf = policy_max / perf_ctl_scaling;
2394 if (policy_max == policy_min) {
2395 min_policy_perf = max_policy_perf;
2396 } else {
2397 min_policy_perf = policy_min / perf_ctl_scaling;
2398 min_policy_perf = clamp_t(int32_t, min_policy_perf,
2399 0, max_policy_perf);
2400 }
2401
2402 /*
2403 * HWP needs some special consideration, because HWP_REQUEST uses
2404 * abstract values to represent performance rather than pure ratios.
2405 */
2406 if (hwp_active) {
2407 intel_pstate_get_hwp_cap(cpu);
2408
2409 if (cpu->pstate.scaling != perf_ctl_scaling) {
2410 int scaling = cpu->pstate.scaling;
2411 int freq;
2412
2413 freq = max_policy_perf * perf_ctl_scaling;
2414 max_policy_perf = DIV_ROUND_UP(freq, scaling);
2415 freq = min_policy_perf * perf_ctl_scaling;
2416 min_policy_perf = DIV_ROUND_UP(freq, scaling);
2417 }
2418 }
2419
2420 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
2421 cpu->cpu, min_policy_perf, max_policy_perf);
2422
2423 /* Normalize user input to [min_perf, max_perf] */
2424 if (per_cpu_limits) {
2425 cpu->min_perf_ratio = min_policy_perf;
2426 cpu->max_perf_ratio = max_policy_perf;
2427 } else {
2428 int turbo_max = cpu->pstate.turbo_pstate;
2429 int32_t global_min, global_max;
2430
2431 /* Global limits are in percent of the maximum turbo P-state. */
2432 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2433 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2434 global_min = clamp_t(int32_t, global_min, 0, global_max);
2435
2436 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
2437 global_min, global_max);
2438
2439 cpu->min_perf_ratio = max(min_policy_perf, global_min);
2440 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
2441 cpu->max_perf_ratio = min(max_policy_perf, global_max);
2442 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
2443
2444 /* Make sure min_perf <= max_perf */
2445 cpu->min_perf_ratio = min(cpu->min_perf_ratio,
2446 cpu->max_perf_ratio);
2447
2448 }
2449 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
2450 cpu->max_perf_ratio,
2451 cpu->min_perf_ratio);
2452}
2453
2454static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2455{
2456 struct cpudata *cpu;
2457
2458 if (!policy->cpuinfo.max_freq)
2459 return -ENODEV;
2460
2461 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2462 policy->cpuinfo.max_freq, policy->max);
2463
2464 cpu = all_cpu_data[policy->cpu];
2465 cpu->policy = policy->policy;
2466
2467 mutex_lock(&intel_pstate_limits_lock);
2468
2469 intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2470
2471 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2472 /*
2473 * NOHZ_FULL CPUs need this as the governor callback may not
2474 * be invoked on them.
2475 */
2476 intel_pstate_clear_update_util_hook(policy->cpu);
2477 intel_pstate_max_within_limits(cpu);
2478 } else {
2479 intel_pstate_set_update_util_hook(policy->cpu);
2480 }
2481
2482 if (hwp_active) {
2483 /*
2484 * When hwp_boost was active before and dynamically it
2485 * was turned off, in that case we need to clear the
2486 * update util hook.
2487 */
2488 if (!hwp_boost)
2489 intel_pstate_clear_update_util_hook(policy->cpu);
2490 intel_pstate_hwp_set(policy->cpu);
2491 }
2492
2493 mutex_unlock(&intel_pstate_limits_lock);
2494
2495 return 0;
2496}
2497
2498static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
2499 struct cpufreq_policy_data *policy)
2500{
2501 if (!hwp_active &&
2502 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2503 policy->max < policy->cpuinfo.max_freq &&
2504 policy->max > cpu->pstate.max_freq) {
2505 pr_debug("policy->max > max non turbo frequency\n");
2506 policy->max = policy->cpuinfo.max_freq;
2507 }
2508}
2509
2510static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
2511 struct cpufreq_policy_data *policy)
2512{
2513 int max_freq;
2514
2515 update_turbo_state();
2516 if (hwp_active) {
2517 intel_pstate_get_hwp_cap(cpu);
2518 max_freq = global.no_turbo || global.turbo_disabled ?
2519 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2520 } else {
2521 max_freq = intel_pstate_get_max_freq(cpu);
2522 }
2523 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
2524
2525 intel_pstate_adjust_policy_max(cpu, policy);
2526}
2527
2528static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
2529{
2530 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
2531
2532 return 0;
2533}
2534
2535static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
2536{
2537 struct cpudata *cpu = all_cpu_data[policy->cpu];
2538
2539 pr_debug("CPU %d going offline\n", cpu->cpu);
2540
2541 if (cpu->suspended)
2542 return 0;
2543
2544 /*
2545 * If the CPU is an SMT thread and it goes offline with the performance
2546 * settings different from the minimum, it will prevent its sibling
2547 * from getting to lower performance levels, so force the minimum
2548 * performance on CPU offline to prevent that from happening.
2549 */
2550 if (hwp_active)
2551 intel_pstate_hwp_offline(cpu);
2552 else
2553 intel_pstate_set_min_pstate(cpu);
2554
2555 intel_pstate_exit_perf_limits(policy);
2556
2557 return 0;
2558}
2559
2560static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
2561{
2562 struct cpudata *cpu = all_cpu_data[policy->cpu];
2563
2564 pr_debug("CPU %d going online\n", cpu->cpu);
2565
2566 intel_pstate_init_acpi_perf_limits(policy);
2567
2568 if (hwp_active) {
2569 /*
2570 * Re-enable HWP and clear the "suspended" flag to let "resume"
2571 * know that it need not do that.
2572 */
2573 intel_pstate_hwp_reenable(cpu);
2574 cpu->suspended = false;
2575 }
2576
2577 return 0;
2578}
2579
2580static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
2581{
2582 intel_pstate_clear_update_util_hook(policy->cpu);
2583
2584 return intel_cpufreq_cpu_offline(policy);
2585}
2586
2587static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2588{
2589 pr_debug("CPU %d exiting\n", policy->cpu);
2590
2591 policy->fast_switch_possible = false;
2592
2593 return 0;
2594}
2595
2596static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2597{
2598 struct cpudata *cpu;
2599 int rc;
2600
2601 rc = intel_pstate_init_cpu(policy->cpu);
2602 if (rc)
2603 return rc;
2604
2605 cpu = all_cpu_data[policy->cpu];
2606
2607 cpu->max_perf_ratio = 0xFF;
2608 cpu->min_perf_ratio = 0;
2609
2610 /* cpuinfo and default policy values */
2611 policy->cpuinfo.min_freq = cpu->pstate.min_freq;
2612 update_turbo_state();
2613 global.turbo_disabled_mf = global.turbo_disabled;
2614 policy->cpuinfo.max_freq = global.turbo_disabled ?
2615 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2616
2617 policy->min = policy->cpuinfo.min_freq;
2618 policy->max = policy->cpuinfo.max_freq;
2619
2620 intel_pstate_init_acpi_perf_limits(policy);
2621
2622 policy->fast_switch_possible = true;
2623
2624 return 0;
2625}
2626
2627static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2628{
2629 int ret = __intel_pstate_cpu_init(policy);
2630
2631 if (ret)
2632 return ret;
2633
2634 /*
2635 * Set the policy to powersave to provide a valid fallback value in case
2636 * the default cpufreq governor is neither powersave nor performance.
2637 */
2638 policy->policy = CPUFREQ_POLICY_POWERSAVE;
2639
2640 if (hwp_active) {
2641 struct cpudata *cpu = all_cpu_data[policy->cpu];
2642
2643 cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
2644 }
2645
2646 return 0;
2647}
2648
2649static struct cpufreq_driver intel_pstate = {
2650 .flags = CPUFREQ_CONST_LOOPS,
2651 .verify = intel_pstate_verify_policy,
2652 .setpolicy = intel_pstate_set_policy,
2653 .suspend = intel_pstate_suspend,
2654 .resume = intel_pstate_resume,
2655 .init = intel_pstate_cpu_init,
2656 .exit = intel_pstate_cpu_exit,
2657 .offline = intel_pstate_cpu_offline,
2658 .online = intel_pstate_cpu_online,
2659 .update_limits = intel_pstate_update_limits,
2660 .name = "intel_pstate",
2661};
2662
2663static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
2664{
2665 struct cpudata *cpu = all_cpu_data[policy->cpu];
2666
2667 intel_pstate_verify_cpu_policy(cpu, policy);
2668 intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2669
2670 return 0;
2671}
2672
2673/* Use of trace in passive mode:
2674 *
2675 * In passive mode the trace core_busy field (also known as the
2676 * performance field, and lablelled as such on the graphs; also known as
2677 * core_avg_perf) is not needed and so is re-assigned to indicate if the
2678 * driver call was via the normal or fast switch path. Various graphs
2679 * output from the intel_pstate_tracer.py utility that include core_busy
2680 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2681 * so we use 10 to indicate the normal path through the driver, and
2682 * 90 to indicate the fast switch path through the driver.
2683 * The scaled_busy field is not used, and is set to 0.
2684 */
2685
2686#define INTEL_PSTATE_TRACE_TARGET 10
2687#define INTEL_PSTATE_TRACE_FAST_SWITCH 90
2688
2689static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
2690{
2691 struct sample *sample;
2692
2693 if (!trace_pstate_sample_enabled())
2694 return;
2695
2696 if (!intel_pstate_sample(cpu, ktime_get()))
2697 return;
2698
2699 sample = &cpu->sample;
2700 trace_pstate_sample(trace_type,
2701 0,
2702 old_pstate,
2703 cpu->pstate.current_pstate,
2704 sample->mperf,
2705 sample->aperf,
2706 sample->tsc,
2707 get_avg_frequency(cpu),
2708 fp_toint(cpu->iowait_boost * 100));
2709}
2710
2711static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
2712 u32 desired, bool fast_switch)
2713{
2714 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
2715
2716 value &= ~HWP_MIN_PERF(~0L);
2717 value |= HWP_MIN_PERF(min);
2718
2719 value &= ~HWP_MAX_PERF(~0L);
2720 value |= HWP_MAX_PERF(max);
2721
2722 value &= ~HWP_DESIRED_PERF(~0L);
2723 value |= HWP_DESIRED_PERF(desired);
2724
2725 if (value == prev)
2726 return;
2727
2728 WRITE_ONCE(cpu->hwp_req_cached, value);
2729 if (fast_switch)
2730 wrmsrl(MSR_HWP_REQUEST, value);
2731 else
2732 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
2733}
2734
2735static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
2736 u32 target_pstate, bool fast_switch)
2737{
2738 if (fast_switch)
2739 wrmsrl(MSR_IA32_PERF_CTL,
2740 pstate_funcs.get_val(cpu, target_pstate));
2741 else
2742 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
2743 pstate_funcs.get_val(cpu, target_pstate));
2744}
2745
2746static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
2747 int target_pstate, bool fast_switch)
2748{
2749 struct cpudata *cpu = all_cpu_data[policy->cpu];
2750 int old_pstate = cpu->pstate.current_pstate;
2751
2752 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2753 if (hwp_active) {
2754 int max_pstate = policy->strict_target ?
2755 target_pstate : cpu->max_perf_ratio;
2756
2757 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
2758 fast_switch);
2759 } else if (target_pstate != old_pstate) {
2760 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
2761 }
2762
2763 cpu->pstate.current_pstate = target_pstate;
2764
2765 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
2766 INTEL_PSTATE_TRACE_TARGET, old_pstate);
2767
2768 return target_pstate;
2769}
2770
2771static int intel_cpufreq_target(struct cpufreq_policy *policy,
2772 unsigned int target_freq,
2773 unsigned int relation)
2774{
2775 struct cpudata *cpu = all_cpu_data[policy->cpu];
2776 struct cpufreq_freqs freqs;
2777 int target_pstate;
2778
2779 update_turbo_state();
2780
2781 freqs.old = policy->cur;
2782 freqs.new = target_freq;
2783
2784 cpufreq_freq_transition_begin(policy, &freqs);
2785
2786 switch (relation) {
2787 case CPUFREQ_RELATION_L:
2788 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
2789 break;
2790 case CPUFREQ_RELATION_H:
2791 target_pstate = freqs.new / cpu->pstate.scaling;
2792 break;
2793 default:
2794 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
2795 break;
2796 }
2797
2798 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
2799
2800 freqs.new = target_pstate * cpu->pstate.scaling;
2801
2802 cpufreq_freq_transition_end(policy, &freqs, false);
2803
2804 return 0;
2805}
2806
2807static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2808 unsigned int target_freq)
2809{
2810 struct cpudata *cpu = all_cpu_data[policy->cpu];
2811 int target_pstate;
2812
2813 update_turbo_state();
2814
2815 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2816
2817 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
2818
2819 return target_pstate * cpu->pstate.scaling;
2820}
2821
2822static void intel_cpufreq_adjust_perf(unsigned int cpunum,
2823 unsigned long min_perf,
2824 unsigned long target_perf,
2825 unsigned long capacity)
2826{
2827 struct cpudata *cpu = all_cpu_data[cpunum];
2828 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2829 int old_pstate = cpu->pstate.current_pstate;
2830 int cap_pstate, min_pstate, max_pstate, target_pstate;
2831
2832 update_turbo_state();
2833 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
2834 HWP_HIGHEST_PERF(hwp_cap);
2835
2836 /* Optimization: Avoid unnecessary divisions. */
2837
2838 target_pstate = cap_pstate;
2839 if (target_perf < capacity)
2840 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
2841
2842 min_pstate = cap_pstate;
2843 if (min_perf < capacity)
2844 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
2845
2846 if (min_pstate < cpu->pstate.min_pstate)
2847 min_pstate = cpu->pstate.min_pstate;
2848
2849 if (min_pstate < cpu->min_perf_ratio)
2850 min_pstate = cpu->min_perf_ratio;
2851
2852 max_pstate = min(cap_pstate, cpu->max_perf_ratio);
2853 if (max_pstate < min_pstate)
2854 max_pstate = min_pstate;
2855
2856 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
2857
2858 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
2859
2860 cpu->pstate.current_pstate = target_pstate;
2861 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
2862}
2863
2864static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2865{
2866 struct freq_qos_request *req;
2867 struct cpudata *cpu;
2868 struct device *dev;
2869 int ret, freq;
2870
2871 dev = get_cpu_device(policy->cpu);
2872 if (!dev)
2873 return -ENODEV;
2874
2875 ret = __intel_pstate_cpu_init(policy);
2876 if (ret)
2877 return ret;
2878
2879 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
2880 /* This reflects the intel_pstate_get_cpu_pstates() setting. */
2881 policy->cur = policy->cpuinfo.min_freq;
2882
2883 req = kcalloc(2, sizeof(*req), GFP_KERNEL);
2884 if (!req) {
2885 ret = -ENOMEM;
2886 goto pstate_exit;
2887 }
2888
2889 cpu = all_cpu_data[policy->cpu];
2890
2891 if (hwp_active) {
2892 u64 value;
2893
2894 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
2895
2896 intel_pstate_get_hwp_cap(cpu);
2897
2898 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
2899 WRITE_ONCE(cpu->hwp_req_cached, value);
2900
2901 cpu->epp_cached = intel_pstate_get_epp(cpu, value);
2902 } else {
2903 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
2904 }
2905
2906 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
2907
2908 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
2909 freq);
2910 if (ret < 0) {
2911 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
2912 goto free_req;
2913 }
2914
2915 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
2916
2917 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
2918 freq);
2919 if (ret < 0) {
2920 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
2921 goto remove_min_req;
2922 }
2923
2924 policy->driver_data = req;
2925
2926 return 0;
2927
2928remove_min_req:
2929 freq_qos_remove_request(req);
2930free_req:
2931 kfree(req);
2932pstate_exit:
2933 intel_pstate_exit_perf_limits(policy);
2934
2935 return ret;
2936}
2937
2938static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
2939{
2940 struct freq_qos_request *req;
2941
2942 req = policy->driver_data;
2943
2944 freq_qos_remove_request(req + 1);
2945 freq_qos_remove_request(req);
2946 kfree(req);
2947
2948 return intel_pstate_cpu_exit(policy);
2949}
2950
2951static struct cpufreq_driver intel_cpufreq = {
2952 .flags = CPUFREQ_CONST_LOOPS,
2953 .verify = intel_cpufreq_verify_policy,
2954 .target = intel_cpufreq_target,
2955 .fast_switch = intel_cpufreq_fast_switch,
2956 .init = intel_cpufreq_cpu_init,
2957 .exit = intel_cpufreq_cpu_exit,
2958 .offline = intel_cpufreq_cpu_offline,
2959 .online = intel_pstate_cpu_online,
2960 .suspend = intel_pstate_suspend,
2961 .resume = intel_pstate_resume,
2962 .update_limits = intel_pstate_update_limits,
2963 .name = "intel_cpufreq",
2964};
2965
2966static struct cpufreq_driver *default_driver;
2967
2968static void intel_pstate_driver_cleanup(void)
2969{
2970 unsigned int cpu;
2971
2972 get_online_cpus();
2973 for_each_online_cpu(cpu) {
2974 if (all_cpu_data[cpu]) {
2975 if (intel_pstate_driver == &intel_pstate)
2976 intel_pstate_clear_update_util_hook(cpu);
2977
2978 kfree(all_cpu_data[cpu]);
2979 all_cpu_data[cpu] = NULL;
2980 }
2981 }
2982 put_online_cpus();
2983
2984 intel_pstate_driver = NULL;
2985}
2986
2987static int intel_pstate_register_driver(struct cpufreq_driver *driver)
2988{
2989 int ret;
2990
2991 if (driver == &intel_pstate)
2992 intel_pstate_sysfs_expose_hwp_dynamic_boost();
2993
2994 memset(&global, 0, sizeof(global));
2995 global.max_perf_pct = 100;
2996
2997 intel_pstate_driver = driver;
2998 ret = cpufreq_register_driver(intel_pstate_driver);
2999 if (ret) {
3000 intel_pstate_driver_cleanup();
3001 return ret;
3002 }
3003
3004 global.min_perf_pct = min_perf_pct_min();
3005
3006 return 0;
3007}
3008
3009static ssize_t intel_pstate_show_status(char *buf)
3010{
3011 if (!intel_pstate_driver)
3012 return sprintf(buf, "off\n");
3013
3014 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
3015 "active" : "passive");
3016}
3017
3018static int intel_pstate_update_status(const char *buf, size_t size)
3019{
3020 if (size == 3 && !strncmp(buf, "off", size)) {
3021 if (!intel_pstate_driver)
3022 return -EINVAL;
3023
3024 if (hwp_active)
3025 return -EBUSY;
3026
3027 cpufreq_unregister_driver(intel_pstate_driver);
3028 intel_pstate_driver_cleanup();
3029 return 0;
3030 }
3031
3032 if (size == 6 && !strncmp(buf, "active", size)) {
3033 if (intel_pstate_driver) {
3034 if (intel_pstate_driver == &intel_pstate)
3035 return 0;
3036
3037 cpufreq_unregister_driver(intel_pstate_driver);
3038 }
3039
3040 return intel_pstate_register_driver(&intel_pstate);
3041 }
3042
3043 if (size == 7 && !strncmp(buf, "passive", size)) {
3044 if (intel_pstate_driver) {
3045 if (intel_pstate_driver == &intel_cpufreq)
3046 return 0;
3047
3048 cpufreq_unregister_driver(intel_pstate_driver);
3049 intel_pstate_sysfs_hide_hwp_dynamic_boost();
3050 }
3051
3052 return intel_pstate_register_driver(&intel_cpufreq);
3053 }
3054
3055 return -EINVAL;
3056}
3057
3058static int no_load __initdata;
3059static int no_hwp __initdata;
3060static int hwp_only __initdata;
3061static unsigned int force_load __initdata;
3062
3063static int __init intel_pstate_msrs_not_valid(void)
3064{
3065 if (!pstate_funcs.get_max() ||
3066 !pstate_funcs.get_min() ||
3067 !pstate_funcs.get_turbo())
3068 return -ENODEV;
3069
3070 return 0;
3071}
3072
3073static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
3074{
3075 pstate_funcs.get_max = funcs->get_max;
3076 pstate_funcs.get_max_physical = funcs->get_max_physical;
3077 pstate_funcs.get_min = funcs->get_min;
3078 pstate_funcs.get_turbo = funcs->get_turbo;
3079 pstate_funcs.get_scaling = funcs->get_scaling;
3080 pstate_funcs.get_val = funcs->get_val;
3081 pstate_funcs.get_vid = funcs->get_vid;
3082 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
3083}
3084
3085#ifdef CONFIG_ACPI
3086
3087static bool __init intel_pstate_no_acpi_pss(void)
3088{
3089 int i;
3090
3091 for_each_possible_cpu(i) {
3092 acpi_status status;
3093 union acpi_object *pss;
3094 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
3095 struct acpi_processor *pr = per_cpu(processors, i);
3096
3097 if (!pr)
3098 continue;
3099
3100 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
3101 if (ACPI_FAILURE(status))
3102 continue;
3103
3104 pss = buffer.pointer;
3105 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
3106 kfree(pss);
3107 return false;
3108 }
3109
3110 kfree(pss);
3111 }
3112
3113 pr_debug("ACPI _PSS not found\n");
3114 return true;
3115}
3116
3117static bool __init intel_pstate_no_acpi_pcch(void)
3118{
3119 acpi_status status;
3120 acpi_handle handle;
3121
3122 status = acpi_get_handle(NULL, "\\_SB", &handle);
3123 if (ACPI_FAILURE(status))
3124 goto not_found;
3125
3126 if (acpi_has_method(handle, "PCCH"))
3127 return false;
3128
3129not_found:
3130 pr_debug("ACPI PCCH not found\n");
3131 return true;
3132}
3133
3134static bool __init intel_pstate_has_acpi_ppc(void)
3135{
3136 int i;
3137
3138 for_each_possible_cpu(i) {
3139 struct acpi_processor *pr = per_cpu(processors, i);
3140
3141 if (!pr)
3142 continue;
3143 if (acpi_has_method(pr->handle, "_PPC"))
3144 return true;
3145 }
3146 pr_debug("ACPI _PPC not found\n");
3147 return false;
3148}
3149
3150enum {
3151 PSS,
3152 PPC,
3153};
3154
3155/* Hardware vendor-specific info that has its own power management modes */
3156static struct acpi_platform_list plat_info[] __initdata = {
3157 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
3158 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3159 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3160 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3161 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3162 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3163 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3164 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3165 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3166 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3167 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3168 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3169 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3170 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3171 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3172 { } /* End */
3173};
3174
3175#define BITMASK_OOB (BIT(8) | BIT(18))
3176
3177static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
3178{
3179 const struct x86_cpu_id *id;
3180 u64 misc_pwr;
3181 int idx;
3182
3183 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
3184 if (id) {
3185 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
3186 if (misc_pwr & BITMASK_OOB) {
3187 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
3188 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
3189 return true;
3190 }
3191 }
3192
3193 idx = acpi_match_platform_list(plat_info);
3194 if (idx < 0)
3195 return false;
3196
3197 switch (plat_info[idx].data) {
3198 case PSS:
3199 if (!intel_pstate_no_acpi_pss())
3200 return false;
3201
3202 return intel_pstate_no_acpi_pcch();
3203 case PPC:
3204 return intel_pstate_has_acpi_ppc() && !force_load;
3205 }
3206
3207 return false;
3208}
3209
3210static void intel_pstate_request_control_from_smm(void)
3211{
3212 /*
3213 * It may be unsafe to request P-states control from SMM if _PPC support
3214 * has not been enabled.
3215 */
3216 if (acpi_ppc)
3217 acpi_processor_pstate_control();
3218}
3219#else /* CONFIG_ACPI not enabled */
3220static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
3221static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
3222static inline void intel_pstate_request_control_from_smm(void) {}
3223#endif /* CONFIG_ACPI */
3224
3225#define INTEL_PSTATE_HWP_BROADWELL 0x01
3226
3227#define X86_MATCH_HWP(model, hwp_mode) \
3228 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
3229 X86_FEATURE_HWP, hwp_mode)
3230
3231static const struct x86_cpu_id hwp_support_ids[] __initconst = {
3232 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
3233 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
3234 X86_MATCH_HWP(ANY, 0),
3235 {}
3236};
3237
3238static bool intel_pstate_hwp_is_enabled(void)
3239{
3240 u64 value;
3241
3242 rdmsrl(MSR_PM_ENABLE, value);
3243 return !!(value & 0x1);
3244}
3245
3246static int __init intel_pstate_init(void)
3247{
3248 const struct x86_cpu_id *id;
3249 int rc;
3250
3251 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3252 return -ENODEV;
3253
3254 id = x86_match_cpu(hwp_support_ids);
3255 if (id) {
3256 bool hwp_forced = intel_pstate_hwp_is_enabled();
3257
3258 if (hwp_forced)
3259 pr_info("HWP enabled by BIOS\n");
3260 else if (no_load)
3261 return -ENODEV;
3262
3263 copy_cpu_funcs(&core_funcs);
3264 /*
3265 * Avoid enabling HWP for processors without EPP support,
3266 * because that means incomplete HWP implementation which is a
3267 * corner case and supporting it is generally problematic.
3268 *
3269 * If HWP is enabled already, though, there is no choice but to
3270 * deal with it.
3271 */
3272 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
3273 hwp_active++;
3274 hwp_mode_bdw = id->driver_data;
3275 intel_pstate.attr = hwp_cpufreq_attrs;
3276 intel_cpufreq.attr = hwp_cpufreq_attrs;
3277 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
3278 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
3279 if (!default_driver)
3280 default_driver = &intel_pstate;
3281
3282 goto hwp_cpu_matched;
3283 }
3284 pr_info("HWP not enabled\n");
3285 } else {
3286 if (no_load)
3287 return -ENODEV;
3288
3289 id = x86_match_cpu(intel_pstate_cpu_ids);
3290 if (!id) {
3291 pr_info("CPU model not supported\n");
3292 return -ENODEV;
3293 }
3294
3295 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
3296 }
3297
3298 if (intel_pstate_msrs_not_valid()) {
3299 pr_info("Invalid MSRs\n");
3300 return -ENODEV;
3301 }
3302 /* Without HWP start in the passive mode. */
3303 if (!default_driver)
3304 default_driver = &intel_cpufreq;
3305
3306hwp_cpu_matched:
3307 /*
3308 * The Intel pstate driver will be ignored if the platform
3309 * firmware has its own power management modes.
3310 */
3311 if (intel_pstate_platform_pwr_mgmt_exists()) {
3312 pr_info("P-states controlled by the platform\n");
3313 return -ENODEV;
3314 }
3315
3316 if (!hwp_active && hwp_only)
3317 return -ENOTSUPP;
3318
3319 pr_info("Intel P-state driver initializing\n");
3320
3321 all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
3322 if (!all_cpu_data)
3323 return -ENOMEM;
3324
3325 intel_pstate_request_control_from_smm();
3326
3327 intel_pstate_sysfs_expose_params();
3328
3329 mutex_lock(&intel_pstate_driver_lock);
3330 rc = intel_pstate_register_driver(default_driver);
3331 mutex_unlock(&intel_pstate_driver_lock);
3332 if (rc) {
3333 intel_pstate_sysfs_remove();
3334 return rc;
3335 }
3336
3337 if (hwp_active) {
3338 const struct x86_cpu_id *id;
3339
3340 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
3341 if (id) {
3342 set_power_ctl_ee_state(false);
3343 pr_info("Disabling energy efficiency optimization\n");
3344 }
3345
3346 pr_info("HWP enabled\n");
3347 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
3348 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
3349 }
3350
3351 return 0;
3352}
3353device_initcall(intel_pstate_init);
3354
3355static int __init intel_pstate_setup(char *str)
3356{
3357 if (!str)
3358 return -EINVAL;
3359
3360 if (!strcmp(str, "disable"))
3361 no_load = 1;
3362 else if (!strcmp(str, "active"))
3363 default_driver = &intel_pstate;
3364 else if (!strcmp(str, "passive"))
3365 default_driver = &intel_cpufreq;
3366
3367 if (!strcmp(str, "no_hwp"))
3368 no_hwp = 1;
3369
3370 if (!strcmp(str, "force"))
3371 force_load = 1;
3372 if (!strcmp(str, "hwp_only"))
3373 hwp_only = 1;
3374 if (!strcmp(str, "per_cpu_perf_limits"))
3375 per_cpu_limits = true;
3376
3377#ifdef CONFIG_ACPI
3378 if (!strcmp(str, "support_acpi_ppc"))
3379 acpi_ppc = true;
3380#endif
3381
3382 return 0;
3383}
3384early_param("intel_pstate", intel_pstate_setup);
3385
3386MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
3387MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
3388MODULE_LICENSE("GPL");