Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
  4 * Copyright (c) 2017, Intel Corporation.
  5 * All rights reserved.
  6 *
  7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
 
 
 
 
 
 
 
 
 
 
  8 */
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10
 11#include <linux/cpufeature.h>
 12#include <linux/cpuhotplug.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 
 15#include <linux/topology.h>
 16#include <linux/workqueue.h>
 17
 
 18#include <asm/cpu_device_id.h>
 19#include <asm/intel-family.h>
 20
 21#define MSR_OC_MAILBOX			0x150
 22#define MSR_OC_MAILBOX_CMD_OFFSET	32
 23#define MSR_OC_MAILBOX_RSP_OFFSET	32
 24#define MSR_OC_MAILBOX_BUSY_BIT		63
 25#define OC_MAILBOX_FC_CONTROL_CMD	0x1C
 26
 27/*
 28 * Typical latency to get mail box response is ~3us, It takes +3 us to
 29 * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
 30 * system. So for most of the time, the first mailbox read should have the
 31 * response, but to avoid some boundary cases retry twice.
 32 */
 33#define OC_MAILBOX_RETRY_COUNT		2
 34
 35static int get_oc_core_priority(unsigned int cpu)
 36{
 37	u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
 38	int ret, i;
 39
 40	/* Issue favored core read command */
 41	value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
 42	/* Set the busy bit to indicate OS is trying to issue command */
 43	value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
 44	ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
 45	if (ret) {
 46		pr_debug("cpu %d OC mailbox write failed\n", cpu);
 47		return ret;
 48	}
 49
 50	for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
 51		ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
 52		if (ret) {
 53			pr_debug("cpu %d OC mailbox read failed\n", cpu);
 54			break;
 55		}
 56
 57		if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
 58			pr_debug("cpu %d OC mailbox still processing\n", cpu);
 59			ret = -EBUSY;
 60			continue;
 61		}
 62
 63		if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
 64			pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
 65			ret = -ENXIO;
 66			break;
 67		}
 68
 69		ret = value & 0xff;
 70		pr_debug("cpu %d max_ratio %d\n", cpu, ret);
 71		break;
 72	}
 73
 74	return ret;
 75}
 76
 77/*
 78 * The work item is needed to avoid CPU hotplug locking issues. The function
 79 * itmt_legacy_set_priority() is called from CPU online callback, so can't
 80 * call sched_set_itmt_support() from there as this function will aquire
 81 * hotplug locks in its path.
 82 */
 83static void itmt_legacy_work_fn(struct work_struct *work)
 84{
 85	sched_set_itmt_support();
 86}
 87
 88static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
 89
 90static int itmt_legacy_cpu_online(unsigned int cpu)
 91{
 92	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
 93	int priority;
 94
 95	priority = get_oc_core_priority(cpu);
 96	if (priority < 0)
 97		return 0;
 98
 99	sched_set_itmt_core_prio(priority, cpu);
100
101	/* Enable ITMT feature when a core with different priority is found */
102	if (max_highest_perf <= min_highest_perf) {
103		if (priority > max_highest_perf)
104			max_highest_perf = priority;
105
106		if (priority < min_highest_perf)
107			min_highest_perf = priority;
108
109		if (max_highest_perf > min_highest_perf)
110			schedule_work(&sched_itmt_work);
111	}
112
113	return 0;
114}
115
116#define ICPU(model)     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
117
118static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
119	ICPU(INTEL_FAM6_BROADWELL_X),
120	ICPU(INTEL_FAM6_SKYLAKE_X),
121	{}
122};
123
124static int __init itmt_legacy_init(void)
125{
126	const struct x86_cpu_id *id;
127	int ret;
128
129	id = x86_match_cpu(itmt_legacy_cpu_ids);
130	if (!id)
131		return -ENODEV;
132
133	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
134				"platform/x86/turbo_max_3:online",
135				itmt_legacy_cpu_online,	NULL);
136	if (ret < 0)
137		return ret;
138
139	return 0;
140}
141late_initcall(itmt_legacy_init)
v4.17
 
  1/*
  2 * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
  3 * Copyright (c) 2017, Intel Corporation.
  4 * All rights reserved.
  5 *
  6 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms and conditions of the GNU General Public License,
 10 * version 2, as published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope it will be useful, but WITHOUT
 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 15 * more details.
 16 *
 17 */
 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 19
 
 
 
 20#include <linux/kernel.h>
 21#include <linux/init.h>
 22#include <linux/topology.h>
 23#include <linux/workqueue.h>
 24#include <linux/cpuhotplug.h>
 25#include <linux/cpufeature.h>
 26#include <asm/cpu_device_id.h>
 27#include <asm/intel-family.h>
 28
 29#define MSR_OC_MAILBOX			0x150
 30#define MSR_OC_MAILBOX_CMD_OFFSET	32
 31#define MSR_OC_MAILBOX_RSP_OFFSET	32
 32#define MSR_OC_MAILBOX_BUSY_BIT		63
 33#define OC_MAILBOX_FC_CONTROL_CMD	0x1C
 34
 35/*
 36 * Typical latency to get mail box response is ~3us, It takes +3 us to
 37 * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
 38 * system. So for most of the time, the first mailbox read should have the
 39 * response, but to avoid some boundary cases retry twice.
 40 */
 41#define OC_MAILBOX_RETRY_COUNT		2
 42
 43static int get_oc_core_priority(unsigned int cpu)
 44{
 45	u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
 46	int ret, i;
 47
 48	/* Issue favored core read command */
 49	value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
 50	/* Set the busy bit to indicate OS is trying to issue command */
 51	value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
 52	ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
 53	if (ret) {
 54		pr_debug("cpu %d OC mailbox write failed\n", cpu);
 55		return ret;
 56	}
 57
 58	for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
 59		ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
 60		if (ret) {
 61			pr_debug("cpu %d OC mailbox read failed\n", cpu);
 62			break;
 63		}
 64
 65		if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
 66			pr_debug("cpu %d OC mailbox still processing\n", cpu);
 67			ret = -EBUSY;
 68			continue;
 69		}
 70
 71		if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
 72			pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
 73			ret = -ENXIO;
 74			break;
 75		}
 76
 77		ret = value & 0xff;
 78		pr_debug("cpu %d max_ratio %d\n", cpu, ret);
 79		break;
 80	}
 81
 82	return ret;
 83}
 84
 85/*
 86 * The work item is needed to avoid CPU hotplug locking issues. The function
 87 * itmt_legacy_set_priority() is called from CPU online callback, so can't
 88 * call sched_set_itmt_support() from there as this function will aquire
 89 * hotplug locks in its path.
 90 */
 91static void itmt_legacy_work_fn(struct work_struct *work)
 92{
 93	sched_set_itmt_support();
 94}
 95
 96static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
 97
 98static int itmt_legacy_cpu_online(unsigned int cpu)
 99{
100	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
101	int priority;
102
103	priority = get_oc_core_priority(cpu);
104	if (priority < 0)
105		return 0;
106
107	sched_set_itmt_core_prio(priority, cpu);
108
109	/* Enable ITMT feature when a core with different priority is found */
110	if (max_highest_perf <= min_highest_perf) {
111		if (priority > max_highest_perf)
112			max_highest_perf = priority;
113
114		if (priority < min_highest_perf)
115			min_highest_perf = priority;
116
117		if (max_highest_perf > min_highest_perf)
118			schedule_work(&sched_itmt_work);
119	}
120
121	return 0;
122}
123
124#define ICPU(model)     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
125
126static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
127	ICPU(INTEL_FAM6_BROADWELL_X),
128	ICPU(INTEL_FAM6_SKYLAKE_X),
129	{}
130};
131
132static int __init itmt_legacy_init(void)
133{
134	const struct x86_cpu_id *id;
135	int ret;
136
137	id = x86_match_cpu(itmt_legacy_cpu_ids);
138	if (!id)
139		return -ENODEV;
140
141	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
142				"platform/x86/turbo_max_3:online",
143				itmt_legacy_cpu_online,	NULL);
144	if (ret < 0)
145		return ret;
146
147	return 0;
148}
149late_initcall(itmt_legacy_init)