Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C) 2005 Intel Corporation
  3 * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  4 * 	- Added _PDC for SMP C-states on Intel CPUs
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/init.h>
 10#include <linux/acpi.h>
 11#include <linux/cpu.h>
 12#include <linux/sched.h>
 13
 14#include <acpi/processor.h>
 15#include <asm/acpi.h>
 16#include <asm/mwait.h>
 
 17
 18/*
 19 * Initialize bm_flags based on the CPU cache properties
 20 * On SMP it depends on cache configuration
 21 * - When cache is not shared among all CPUs, we flush cache
 22 *   before entering C3.
 23 * - When cache is shared among all CPUs, we use bm_check
 24 *   mechanism as in UP case
 25 *
 26 * This routine is called only after all the CPUs are online
 27 */
 28void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
 29					unsigned int cpu)
 30{
 31	struct cpuinfo_x86 *c = &cpu_data(cpu);
 32
 33	flags->bm_check = 0;
 34	if (num_online_cpus() == 1)
 35		flags->bm_check = 1;
 36	else if (c->x86_vendor == X86_VENDOR_INTEL) {
 37		/*
 38		 * Today all MP CPUs that support C3 share cache.
 39		 * And caches should not be flushed by software while
 40		 * entering C3 type state.
 41		 */
 42		flags->bm_check = 1;
 43	}
 44
 45	/*
 46	 * On all recent Intel platforms, ARB_DISABLE is a nop.
 47	 * So, set bm_control to zero to indicate that ARB_DISABLE
 48	 * is not required while entering C3 type state on
 49	 * P4, Core and beyond CPUs
 50	 */
 51	if (c->x86_vendor == X86_VENDOR_INTEL &&
 52	    (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
 53			flags->bm_control = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54}
 55EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
 56
 57/* The code below handles cstate entry with monitor-mwait pair on Intel*/
 58
 59struct cstate_entry {
 60	struct {
 61		unsigned int eax;
 62		unsigned int ecx;
 63	} states[ACPI_PROCESSOR_MAX_POWER];
 64};
 65static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
 66
 67static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
 68
 69#define NATIVE_CSTATE_BEYOND_HALT	(2)
 70
 71static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
 72{
 73	struct acpi_processor_cx *cx = _cx;
 74	long retval;
 75	unsigned int eax, ebx, ecx, edx;
 76	unsigned int edx_part;
 77	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
 78	unsigned int num_cstate_subtype;
 79
 80	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
 81
 82	/* Check whether this particular cx_type (in CST) is supported or not */
 83	cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
 84			MWAIT_CSTATE_MASK) + 1;
 85	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
 86	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
 87
 88	retval = 0;
 89	if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
 
 
 
 90		retval = -1;
 91		goto out;
 92	}
 93
 94	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
 95	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
 96	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
 97		retval = -1;
 98		goto out;
 99	}
100
101	if (!mwait_supported[cstate_type]) {
102		mwait_supported[cstate_type] = 1;
103		printk(KERN_DEBUG
104			"Monitor-Mwait will be used to enter C-%d "
105			"state\n", cx->type);
106	}
107	snprintf(cx->desc,
108			ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
109			cx->address);
110out:
111	return retval;
112}
113
114int acpi_processor_ffh_cstate_probe(unsigned int cpu,
115		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
116{
117	struct cstate_entry *percpu_entry;
118	struct cpuinfo_x86 *c = &cpu_data(cpu);
119	long retval;
120
121	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
122		return -1;
123
124	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
125		return -1;
126
127	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
128	percpu_entry->states[cx->index].eax = 0;
129	percpu_entry->states[cx->index].ecx = 0;
130
131	/* Make sure we are running on right CPU */
132
133	retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
 
134	if (retval == 0) {
135		/* Use the hint in CST */
136		percpu_entry->states[cx->index].eax = cx->address;
137		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
138	}
139
140	/*
141	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
142	 * then we should skip checking BM_STS for this C-state.
143	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
144	 */
145	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
146		cx->bm_sts_skip = 1;
147
148	return retval;
149}
150EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
151
152/*
153 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
154 * which can obviate IPI to trigger checking of need_resched.
155 * We execute MONITOR against need_resched and enter optimized wait state
156 * through MWAIT. Whenever someone changes need_resched, we would be woken
157 * up from MWAIT (without an IPI).
158 *
159 * New with Core Duo processors, MWAIT can take some hints based on CPU
160 * capability.
161 */
162void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
163{
164	if (!need_resched()) {
165		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
166			clflush((void *)&current_thread_info()->flags);
167
168		__monitor((void *)&current_thread_info()->flags, 0, 0);
169		smp_mb();
170		if (!need_resched())
171			__mwait(ax, cx);
172	}
173}
174
175void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
176{
177	unsigned int cpu = smp_processor_id();
178	struct cstate_entry *percpu_entry;
179
180	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
181	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
182	                      percpu_entry->states[cx->index].ecx);
183}
184EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
185
186static int __init ffh_cstate_init(void)
187{
188	struct cpuinfo_x86 *c = &boot_cpu_data;
189	if (c->x86_vendor != X86_VENDOR_INTEL)
 
 
190		return -1;
191
192	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
193	return 0;
194}
195
196static void __exit ffh_cstate_exit(void)
197{
198	free_percpu(cpu_cstate_entry);
199	cpu_cstate_entry = NULL;
200}
201
202arch_initcall(ffh_cstate_init);
203__exitcall(ffh_cstate_exit);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2005 Intel Corporation
  4 * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  5 * 	- Added _PDC for SMP C-states on Intel CPUs
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/export.h>
 10#include <linux/init.h>
 11#include <linux/acpi.h>
 12#include <linux/cpu.h>
 13#include <linux/sched.h>
 14
 15#include <acpi/processor.h>
 
 16#include <asm/mwait.h>
 17#include <asm/special_insns.h>
 18
 19/*
 20 * Initialize bm_flags based on the CPU cache properties
 21 * On SMP it depends on cache configuration
 22 * - When cache is not shared among all CPUs, we flush cache
 23 *   before entering C3.
 24 * - When cache is shared among all CPUs, we use bm_check
 25 *   mechanism as in UP case
 26 *
 27 * This routine is called only after all the CPUs are online
 28 */
 29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
 30					unsigned int cpu)
 31{
 32	struct cpuinfo_x86 *c = &cpu_data(cpu);
 33
 34	flags->bm_check = 0;
 35	if (num_online_cpus() == 1)
 36		flags->bm_check = 1;
 37	else if (c->x86_vendor == X86_VENDOR_INTEL) {
 38		/*
 39		 * Today all MP CPUs that support C3 share cache.
 40		 * And caches should not be flushed by software while
 41		 * entering C3 type state.
 42		 */
 43		flags->bm_check = 1;
 44	}
 45
 46	/*
 47	 * On all recent Intel platforms, ARB_DISABLE is a nop.
 48	 * So, set bm_control to zero to indicate that ARB_DISABLE
 49	 * is not required while entering C3 type state on
 50	 * P4, Core and beyond CPUs
 51	 */
 52	if (c->x86_vendor == X86_VENDOR_INTEL &&
 53	    (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
 54			flags->bm_control = 0;
 55	/*
 56	 * For all recent Centaur CPUs, the ucode will make sure that each
 57	 * core can keep cache coherence with each other while entering C3
 58	 * type state. So, set bm_check to 1 to indicate that the kernel
 59	 * doesn't need to execute a cache flush operation (WBINVD) when
 60	 * entering C3 type state.
 61	 */
 62	if (c->x86_vendor == X86_VENDOR_CENTAUR) {
 63		if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
 64		    c->x86_stepping >= 0x0e))
 65			flags->bm_check = 1;
 66	}
 67
 68	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
 69		/*
 70		 * All Zhaoxin CPUs that support C3 share cache.
 71		 * And caches should not be flushed by software while
 72		 * entering C3 type state.
 73		 */
 74		flags->bm_check = 1;
 75		/*
 76		 * On all recent Zhaoxin platforms, ARB_DISABLE is a nop.
 77		 * So, set bm_control to zero to indicate that ARB_DISABLE
 78		 * is not required while entering C3 type state.
 79		 */
 80		flags->bm_control = 0;
 81	}
 82}
 83EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
 84
 85/* The code below handles cstate entry with monitor-mwait pair on Intel*/
 86
 87struct cstate_entry {
 88	struct {
 89		unsigned int eax;
 90		unsigned int ecx;
 91	} states[ACPI_PROCESSOR_MAX_POWER];
 92};
 93static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
 94
 95static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
 96
 97#define NATIVE_CSTATE_BEYOND_HALT	(2)
 98
 99static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
100{
101	struct acpi_processor_cx *cx = _cx;
102	long retval;
103	unsigned int eax, ebx, ecx, edx;
104	unsigned int edx_part;
105	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
106	unsigned int num_cstate_subtype;
107
108	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
109
110	/* Check whether this particular cx_type (in CST) is supported or not */
111	cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
112			MWAIT_CSTATE_MASK) + 1;
113	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
114	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
115
116	retval = 0;
117	/* If the HW does not support any sub-states in this C-state */
118	if (num_cstate_subtype == 0) {
119		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
120				cx->address, edx_part);
121		retval = -1;
122		goto out;
123	}
124
125	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
126	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
127	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
128		retval = -1;
129		goto out;
130	}
131
132	if (!mwait_supported[cstate_type]) {
133		mwait_supported[cstate_type] = 1;
134		printk(KERN_DEBUG
135			"Monitor-Mwait will be used to enter C-%d state\n",
136			cx->type);
137	}
138	snprintf(cx->desc,
139			ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
140			cx->address);
141out:
142	return retval;
143}
144
145int acpi_processor_ffh_cstate_probe(unsigned int cpu,
146		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
147{
148	struct cstate_entry *percpu_entry;
149	struct cpuinfo_x86 *c = &cpu_data(cpu);
150	long retval;
151
152	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
153		return -1;
154
155	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
156		return -1;
157
158	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
159	percpu_entry->states[cx->index].eax = 0;
160	percpu_entry->states[cx->index].ecx = 0;
161
162	/* Make sure we are running on right CPU */
163
164	retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
165			     false);
166	if (retval == 0) {
167		/* Use the hint in CST */
168		percpu_entry->states[cx->index].eax = cx->address;
169		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
170	}
171
172	/*
173	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
174	 * then we should skip checking BM_STS for this C-state.
175	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
176	 */
177	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
178		cx->bm_sts_skip = 1;
179
180	return retval;
181}
182EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
183
184void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185{
186	unsigned int cpu = smp_processor_id();
187	struct cstate_entry *percpu_entry;
188
189	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
190	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
191	                      percpu_entry->states[cx->index].ecx);
192}
193EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
194
195static int __init ffh_cstate_init(void)
196{
197	struct cpuinfo_x86 *c = &boot_cpu_data;
198
199	if (c->x86_vendor != X86_VENDOR_INTEL &&
200	    c->x86_vendor != X86_VENDOR_AMD)
201		return -1;
202
203	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
204	return 0;
205}
206
207static void __exit ffh_cstate_exit(void)
208{
209	free_percpu(cpu_cstate_entry);
210	cpu_cstate_entry = NULL;
211}
212
213arch_initcall(ffh_cstate_init);
214__exitcall(ffh_cstate_exit);