Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
  4 *
  5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  7 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
  8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  9 *  			- Added processor hotplug support
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10 */
 11
 12#define pr_fmt(fmt) "ACPI: " fmt
 13
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/init.h>
 17#include <linux/cpufreq.h>
 18#include <linux/slab.h>
 19#include <linux/acpi.h>
 20#include <acpi/processor.h>
 21#ifdef CONFIG_X86
 22#include <asm/cpufeature.h>
 23#endif
 24
 
 
 
 25#define ACPI_PROCESSOR_FILE_PERFORMANCE	"performance"
 
 
 26
 27static DEFINE_MUTEX(performance_mutex);
 28
 29/*
 30 * _PPC support is implemented as a CPUfreq policy notifier:
 31 * This means each time a CPUfreq driver registered also with
 32 * the ACPI core is asked to change the speed policy, the maximum
 33 * value is adjusted so that it is within the platform limit.
 34 *
 35 * Also, when a new platform limit value is detected, the CPUfreq
 36 * policy is adjusted accordingly.
 37 */
 38
 39/* ignore_ppc:
 40 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
 41 *       ignore _PPC
 42 *  0 -> cpufreq low level drivers initialized -> consider _PPC values
 43 *  1 -> ignore _PPC totally -> forced by user through boot param
 44 */
 45static int ignore_ppc = -1;
 46module_param(ignore_ppc, int, 0644);
 47MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
 48		 "limited by BIOS, this should help");
 49
 50static bool acpi_processor_ppc_in_use;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51
 52static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
 53{
 54	acpi_status status = 0;
 55	unsigned long long ppc = 0;
 56	s32 qos_value;
 57	int index;
 58	int ret;
 59
 60	if (!pr)
 61		return -EINVAL;
 62
 63	/*
 64	 * _PPC indicates the maximum state currently supported by the platform
 65	 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
 66	 */
 67	status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
 68	if (status != AE_NOT_FOUND) {
 69		acpi_processor_ppc_in_use = true;
 70
 71		if (ACPI_FAILURE(status)) {
 72			acpi_evaluation_failure_warn(pr->handle, "_PPC", status);
 73			return -ENODEV;
 74		}
 75	}
 76
 77	index = ppc;
 
 78
 79	if (pr->performance_platform_limit == index ||
 80	    ppc >= pr->performance->state_count)
 81		return 0;
 
 82
 83	pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
 84		 index, index ? "is" : "is not");
 85
 86	pr->performance_platform_limit = index;
 87
 88	if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
 89		return 0;
 90
 91	/*
 92	 * If _PPC returns 0, it means that all of the available states can be
 93	 * used ("no limit").
 94	 */
 95	if (index == 0)
 96		qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
 97	else
 98		qos_value = pr->performance->states[index].core_frequency * 1000;
 99
100	ret = freq_qos_update_request(&pr->perflib_req, qos_value);
101	if (ret < 0) {
102		pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
103			pr->id, ret);
104	}
105
106	return 0;
107}
108
109#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE	0x80
110/*
111 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
112 * @handle: ACPI processor handle
113 * @status: the status code of _PPC evaluation
114 *	0: success. OSPM is now using the performance state specified.
115 *	1: failure. OSPM has not changed the number of P-states in use
116 */
117static void acpi_processor_ppc_ost(acpi_handle handle, int status)
118{
119	if (acpi_has_method(handle, "_OST"))
120		acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
121				  status, NULL);
122}
123
124void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
125{
126	int ret;
127
128	if (ignore_ppc || !pr->performance) {
129		/*
130		 * Only when it is notification event, the _OST object
131		 * will be evaluated. Otherwise it is skipped.
132		 */
133		if (event_flag)
134			acpi_processor_ppc_ost(pr->handle, 1);
135		return;
136	}
137
138	ret = acpi_processor_get_platform_limit(pr);
139	/*
140	 * Only when it is notification event, the _OST object
141	 * will be evaluated. Otherwise it is skipped.
142	 */
143	if (event_flag) {
144		if (ret < 0)
145			acpi_processor_ppc_ost(pr->handle, 1);
146		else
147			acpi_processor_ppc_ost(pr->handle, 0);
148	}
149	if (ret >= 0)
150		cpufreq_update_limits(pr->id);
 
 
151}
152
153int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
154{
155	struct acpi_processor *pr;
156
157	pr = per_cpu(processors, cpu);
158	if (!pr || !pr->performance || !pr->performance->state_count)
159		return -ENODEV;
160
161	*limit = pr->performance->states[pr->performance_platform_limit].
162		core_frequency * 1000;
163	return 0;
164}
165EXPORT_SYMBOL(acpi_processor_get_bios_limit);
166
167void acpi_processor_ignore_ppc_init(void)
168{
169	if (ignore_ppc < 0)
170		ignore_ppc = 0;
171}
172
173void acpi_processor_ppc_init(struct cpufreq_policy *policy)
174{
175	unsigned int cpu;
176
177	for_each_cpu(cpu, policy->related_cpus) {
178		struct acpi_processor *pr = per_cpu(processors, cpu);
179		int ret;
180
181		if (!pr)
182			continue;
183
184		/*
185		 * Reset performance_platform_limit in case there is a stale
186		 * value in it, so as to make it match the "no limit" QoS value
187		 * below.
188		 */
189		pr->performance_platform_limit = 0;
190
191		ret = freq_qos_add_request(&policy->constraints,
192					   &pr->perflib_req, FREQ_QOS_MAX,
193					   FREQ_QOS_MAX_DEFAULT_VALUE);
194		if (ret < 0)
195			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
196			       cpu, ret);
197	}
198}
199
200void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
201{
202	unsigned int cpu;
203
204	for_each_cpu(cpu, policy->related_cpus) {
205		struct acpi_processor *pr = per_cpu(processors, cpu);
206
207		if (pr)
208			freq_qos_remove_request(&pr->perflib_req);
209	}
210}
211
212static int acpi_processor_get_performance_control(struct acpi_processor *pr)
213{
214	int result = 0;
215	acpi_status status = 0;
216	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
217	union acpi_object *pct = NULL;
218	union acpi_object obj = { 0 };
219
 
220	status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
221	if (ACPI_FAILURE(status)) {
222		acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
223		return -ENODEV;
224	}
225
226	pct = (union acpi_object *)buffer.pointer;
227	if (!pct || pct->type != ACPI_TYPE_PACKAGE || pct->package.count != 2) {
228		pr_err("Invalid _PCT data\n");
 
229		result = -EFAULT;
230		goto end;
231	}
232
233	/*
234	 * control_register
235	 */
236
237	obj = pct->package.elements[0];
238
239	if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
240	    obj.buffer.length < sizeof(struct acpi_pct_register)) {
241		pr_err("Invalid _PCT data (control_register)\n");
 
242		result = -EFAULT;
243		goto end;
244	}
245	memcpy(&pr->performance->control_register, obj.buffer.pointer,
246	       sizeof(struct acpi_pct_register));
247
248	/*
249	 * status_register
250	 */
251
252	obj = pct->package.elements[1];
253
254	if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
255	    obj.buffer.length < sizeof(struct acpi_pct_register)) {
256		pr_err("Invalid _PCT data (status_register)\n");
 
257		result = -EFAULT;
258		goto end;
259	}
260
261	memcpy(&pr->performance->status_register, obj.buffer.pointer,
262	       sizeof(struct acpi_pct_register));
263
264end:
265	kfree(buffer.pointer);
266
267	return result;
268}
269
270#ifdef CONFIG_X86
271/*
272 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
273 * in their ACPI data. Calculate the real values and fix up the _PSS data.
274 */
275static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
276{
277	u32 hi, lo, fid, did;
278	int index = px->control & 0x00000007;
279
280	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
281		return;
282
283	if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) ||
284	    boot_cpu_data.x86 == 0x11) {
285		rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
286		/*
287		 * MSR C001_0064+:
288		 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
289		 */
290		if (!(hi & BIT(31)))
291			return;
292
293		fid = lo & 0x3f;
294		did = (lo >> 6) & 7;
295		if (boot_cpu_data.x86 == 0x10)
296			px->core_frequency = (100 * (fid + 0x10)) >> did;
297		else
298			px->core_frequency = (100 * (fid + 8)) >> did;
299	}
300}
301#else
302static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
303#endif
304
305static int acpi_processor_get_performance_states(struct acpi_processor *pr)
306{
307	int result = 0;
308	acpi_status status = AE_OK;
309	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
310	struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
311	struct acpi_buffer state = { 0, NULL };
312	union acpi_object *pss = NULL;
313	int i;
314	int last_invalid = -1;
315
 
316	status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
317	if (ACPI_FAILURE(status)) {
318		acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
319		return -ENODEV;
320	}
321
322	pss = buffer.pointer;
323	if (!pss || pss->type != ACPI_TYPE_PACKAGE) {
324		pr_err("Invalid _PSS data\n");
325		result = -EFAULT;
326		goto end;
327	}
328
329	acpi_handle_debug(pr->handle, "Found %d performance states\n",
330			  pss->package.count);
331
332	pr->performance->state_count = pss->package.count;
333	pr->performance->states =
334	    kmalloc_array(pss->package.count,
335			  sizeof(struct acpi_processor_px),
336			  GFP_KERNEL);
337	if (!pr->performance->states) {
338		result = -ENOMEM;
339		goto end;
340	}
341
342	for (i = 0; i < pr->performance->state_count; i++) {
343
344		struct acpi_processor_px *px = &(pr->performance->states[i]);
345
346		state.length = sizeof(struct acpi_processor_px);
347		state.pointer = px;
348
349		acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
350
351		status = acpi_extract_package(&(pss->package.elements[i]),
352					      &format, &state);
353		if (ACPI_FAILURE(status)) {
354			acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n",
355					 acpi_format_exception(status));
356			result = -EFAULT;
357			kfree(pr->performance->states);
358			goto end;
359		}
360
361		amd_fixup_frequency(px, i);
362
363		acpi_handle_debug(pr->handle,
364				  "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
365				  i,
366				  (u32) px->core_frequency,
367				  (u32) px->power,
368				  (u32) px->transition_latency,
369				  (u32) px->bus_master_latency,
370				  (u32) px->control, (u32) px->status);
371
372		/*
373		 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
374		 */
375		if (!px->core_frequency ||
376		    (u32)(px->core_frequency * 1000) != px->core_frequency * 1000) {
377			pr_err(FW_BUG
 
378			       "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
379			       pr->id, px->core_frequency);
380			if (last_invalid == -1)
381				last_invalid = i;
382		} else {
383			if (last_invalid != -1) {
384				/*
385				 * Copy this valid entry over last_invalid entry
386				 */
387				memcpy(&(pr->performance->states[last_invalid]),
388				       px, sizeof(struct acpi_processor_px));
389				++last_invalid;
390			}
391		}
392	}
393
394	if (last_invalid == 0) {
395		pr_err(FW_BUG
396			   "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
397		result = -EFAULT;
398		kfree(pr->performance->states);
399		pr->performance->states = NULL;
400	}
401
402	if (last_invalid > 0)
403		pr->performance->state_count = last_invalid;
404
405end:
406	kfree(buffer.pointer);
407
408	return result;
409}
410
411int acpi_processor_get_performance_info(struct acpi_processor *pr)
412{
413	int result = 0;
414
415	if (!pr || !pr->performance || !pr->handle)
416		return -EINVAL;
417
418	if (!acpi_has_method(pr->handle, "_PCT")) {
419		acpi_handle_debug(pr->handle,
420				  "ACPI-based processor performance control unavailable\n");
421		return -ENODEV;
422	}
423
424	result = acpi_processor_get_performance_control(pr);
425	if (result)
426		goto update_bios;
427
428	result = acpi_processor_get_performance_states(pr);
429	if (result)
430		goto update_bios;
431
432	/* We need to call _PPC once when cpufreq starts */
433	if (ignore_ppc != 1)
434		result = acpi_processor_get_platform_limit(pr);
435
436	return result;
437
438	/*
439	 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
440	 * the BIOS is older than the CPU and does not know its frequencies
441	 */
442 update_bios:
443#ifdef CONFIG_X86
444	if (acpi_has_method(pr->handle, "_PPC")) {
445		if(boot_cpu_has(X86_FEATURE_EST))
446			pr_warn(FW_BUG "BIOS needs update for CPU "
447			       "frequency support\n");
448	}
449#endif
450	return result;
451}
452EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
453
454int acpi_processor_pstate_control(void)
455{
456	acpi_status status;
 
457
458	if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
459		return 0;
460
461	pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n",
462		 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command);
463
464	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
465				    (u32)acpi_gbl_FADT.pstate_control, 8);
466	if (ACPI_SUCCESS(status))
467		return 1;
468
469	pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n",
470		acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command,
471		acpi_format_exception(status));
472	return -EIO;
473}
474
475int acpi_processor_notify_smm(struct module *calling_module)
476{
477	static int is_done;
478	int result = 0;
479
480	if (!acpi_processor_cpufreq_init)
481		return -EBUSY;
482
483	if (!try_module_get(calling_module))
484		return -EINVAL;
485
486	/*
487	 * is_done is set to negative if an error occurs and to 1 if no error
488	 * occurrs, but SMM has been notified already. This avoids repeated
489	 * notification which might lead to unexpected results.
490	 */
491	if (is_done != 0) {
492		if (is_done < 0)
493			result = is_done;
 
 
 
 
494
495		goto out_put;
 
 
 
 
 
 
496	}
497
498	result = acpi_processor_pstate_control();
499	if (result <= 0) {
500		if (result) {
501			is_done = result;
502		} else {
503			pr_debug("No SMI port or pstate_control\n");
504			is_done = 1;
505		}
506		goto out_put;
 
 
 
 
507	}
508
 
 
509	is_done = 1;
510	/*
511	 * Success. If there _PPC, unloading the cpufreq driver would be risky,
512	 * so disallow it in that case.
513	 */
514	if (acpi_processor_ppc_in_use)
515		return 0;
516
517out_put:
518	module_put(calling_module);
519	return result;
 
520}
 
521EXPORT_SYMBOL(acpi_processor_notify_smm);
522
523int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
524{
525	int result = 0;
526	acpi_status status = AE_OK;
527	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
528	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
529	struct acpi_buffer state = {0, NULL};
530	union acpi_object  *psd = NULL;
 
531
532	status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
533	if (ACPI_FAILURE(status)) {
534		return -ENODEV;
535	}
536
537	psd = buffer.pointer;
538	if (!psd || psd->type != ACPI_TYPE_PACKAGE) {
539		pr_err("Invalid _PSD data\n");
540		result = -EFAULT;
541		goto end;
542	}
543
544	if (psd->package.count != 1) {
545		pr_err("Invalid _PSD data\n");
546		result = -EFAULT;
547		goto end;
548	}
549
 
 
550	state.length = sizeof(struct acpi_psd_package);
551	state.pointer = pdomain;
552
553	status = acpi_extract_package(&(psd->package.elements[0]), &format, &state);
 
554	if (ACPI_FAILURE(status)) {
555		pr_err("Invalid _PSD data\n");
556		result = -EFAULT;
557		goto end;
558	}
559
560	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
561		pr_err("Unknown _PSD:num_entries\n");
562		result = -EFAULT;
563		goto end;
564	}
565
566	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
567		pr_err("Unknown _PSD:revision\n");
568		result = -EFAULT;
569		goto end;
570	}
571
572	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
573	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
574	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
575		pr_err("Invalid _PSD:coord_type\n");
576		result = -EFAULT;
577		goto end;
578	}
579end:
580	kfree(buffer.pointer);
581	return result;
582}
583EXPORT_SYMBOL(acpi_processor_get_psd);
584
585int acpi_processor_preregister_performance(
586		struct acpi_processor_performance __percpu *performance)
587{
588	int count_target;
589	int retval = 0;
590	unsigned int i, j;
591	cpumask_var_t covered_cpus;
592	struct acpi_processor *pr;
593	struct acpi_psd_package *pdomain;
594	struct acpi_processor *match_pr;
595	struct acpi_psd_package *match_pdomain;
596
597	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
598		return -ENOMEM;
599
600	mutex_lock(&performance_mutex);
601
602	/*
603	 * Check if another driver has already registered, and abort before
604	 * changing pr->performance if it has. Check input data as well.
605	 */
606	for_each_possible_cpu(i) {
607		pr = per_cpu(processors, i);
608		if (!pr) {
609			/* Look only at processors in ACPI namespace */
610			continue;
611		}
612
613		if (pr->performance) {
614			retval = -EBUSY;
615			goto err_out;
616		}
617
618		if (!performance || !per_cpu_ptr(performance, i)) {
619			retval = -EINVAL;
620			goto err_out;
621		}
622	}
623
624	/* Call _PSD for all CPUs */
625	for_each_possible_cpu(i) {
626		pr = per_cpu(processors, i);
627		if (!pr)
628			continue;
629
630		pr->performance = per_cpu_ptr(performance, i);
631		pdomain = &(pr->performance->domain_info);
632		if (acpi_processor_get_psd(pr->handle, pdomain)) {
633			retval = -EINVAL;
634			continue;
635		}
636	}
637	if (retval)
638		goto err_ret;
639
640	/*
641	 * Now that we have _PSD data from all CPUs, lets setup P-state
642	 * domain info.
643	 */
644	for_each_possible_cpu(i) {
645		pr = per_cpu(processors, i);
646		if (!pr)
647			continue;
648
649		if (cpumask_test_cpu(i, covered_cpus))
650			continue;
651
652		pdomain = &(pr->performance->domain_info);
653		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
654		cpumask_set_cpu(i, covered_cpus);
655		if (pdomain->num_processors <= 1)
656			continue;
657
658		/* Validate the Domain info */
659		count_target = pdomain->num_processors;
660		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
661			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
662		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
663			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
664		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
665			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
666
667		for_each_possible_cpu(j) {
668			if (i == j)
669				continue;
670
671			match_pr = per_cpu(processors, j);
672			if (!match_pr)
673				continue;
674
675			match_pdomain = &(match_pr->performance->domain_info);
676			if (match_pdomain->domain != pdomain->domain)
677				continue;
678
679			/* Here i and j are in the same domain */
680
681			if (match_pdomain->num_processors != count_target) {
682				retval = -EINVAL;
683				goto err_ret;
684			}
685
686			if (pdomain->coord_type != match_pdomain->coord_type) {
687				retval = -EINVAL;
688				goto err_ret;
689			}
690
691			cpumask_set_cpu(j, covered_cpus);
692			cpumask_set_cpu(j, pr->performance->shared_cpu_map);
693		}
694
695		for_each_possible_cpu(j) {
696			if (i == j)
697				continue;
698
699			match_pr = per_cpu(processors, j);
700			if (!match_pr)
701				continue;
702
703			match_pdomain = &(match_pr->performance->domain_info);
704			if (match_pdomain->domain != pdomain->domain)
705				continue;
706
707			match_pr->performance->shared_type =
708					pr->performance->shared_type;
709			cpumask_copy(match_pr->performance->shared_cpu_map,
710				     pr->performance->shared_cpu_map);
711		}
712	}
713
714err_ret:
715	for_each_possible_cpu(i) {
716		pr = per_cpu(processors, i);
717		if (!pr || !pr->performance)
718			continue;
719
720		/* Assume no coordination on any error parsing domain info */
721		if (retval) {
722			cpumask_clear(pr->performance->shared_cpu_map);
723			cpumask_set_cpu(i, pr->performance->shared_cpu_map);
724			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
725		}
726		pr->performance = NULL; /* Will be set for real in register */
727	}
728
729err_out:
730	mutex_unlock(&performance_mutex);
731	free_cpumask_var(covered_cpus);
732	return retval;
733}
734EXPORT_SYMBOL(acpi_processor_preregister_performance);
735
736int acpi_processor_register_performance(struct acpi_processor_performance
737					*performance, unsigned int cpu)
 
738{
739	struct acpi_processor *pr;
740
741	if (!acpi_processor_cpufreq_init)
742		return -EINVAL;
743
744	mutex_lock(&performance_mutex);
745
746	pr = per_cpu(processors, cpu);
747	if (!pr) {
748		mutex_unlock(&performance_mutex);
749		return -ENODEV;
750	}
751
752	if (pr->performance) {
753		mutex_unlock(&performance_mutex);
754		return -EBUSY;
755	}
756
757	WARN_ON(!performance);
758
759	pr->performance = performance;
760
761	if (acpi_processor_get_performance_info(pr)) {
762		pr->performance = NULL;
763		mutex_unlock(&performance_mutex);
764		return -EIO;
765	}
766
767	mutex_unlock(&performance_mutex);
768	return 0;
769}
 
770EXPORT_SYMBOL(acpi_processor_register_performance);
771
772void acpi_processor_unregister_performance(unsigned int cpu)
773{
774	struct acpi_processor *pr;
775
776	mutex_lock(&performance_mutex);
777
778	pr = per_cpu(processors, cpu);
779	if (!pr)
780		goto unlock;
 
 
781
782	if (pr->performance)
783		kfree(pr->performance->states);
784
785	pr->performance = NULL;
786
787unlock:
788	mutex_unlock(&performance_mutex);
 
 
789}
 
790EXPORT_SYMBOL(acpi_processor_unregister_performance);
v4.6
 
  1/*
  2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
  3 *
  4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
  7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8 *  			- Added processor hotplug support
  9 *
 10 *
 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 12 *
 13 *  This program is free software; you can redistribute it and/or modify
 14 *  it under the terms of the GNU General Public License as published by
 15 *  the Free Software Foundation; either version 2 of the License, or (at
 16 *  your option) any later version.
 17 *
 18 *  This program is distributed in the hope that it will be useful, but
 19 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 20 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 21 *  General Public License for more details.
 22 *
 23 */
 24
 
 
 25#include <linux/kernel.h>
 26#include <linux/module.h>
 27#include <linux/init.h>
 28#include <linux/cpufreq.h>
 29#include <linux/slab.h>
 30#include <linux/acpi.h>
 31#include <acpi/processor.h>
 32#ifdef CONFIG_X86
 33#include <asm/cpufeature.h>
 34#endif
 35
 36#define PREFIX "ACPI: "
 37
 38#define ACPI_PROCESSOR_CLASS		"processor"
 39#define ACPI_PROCESSOR_FILE_PERFORMANCE	"performance"
 40#define _COMPONENT		ACPI_PROCESSOR_COMPONENT
 41ACPI_MODULE_NAME("processor_perflib");
 42
 43static DEFINE_MUTEX(performance_mutex);
 44
 45/*
 46 * _PPC support is implemented as a CPUfreq policy notifier:
 47 * This means each time a CPUfreq driver registered also with
 48 * the ACPI core is asked to change the speed policy, the maximum
 49 * value is adjusted so that it is within the platform limit.
 50 *
 51 * Also, when a new platform limit value is detected, the CPUfreq
 52 * policy is adjusted accordingly.
 53 */
 54
 55/* ignore_ppc:
 56 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
 57 *       ignore _PPC
 58 *  0 -> cpufreq low level drivers initialized -> consider _PPC values
 59 *  1 -> ignore _PPC totally -> forced by user through boot param
 60 */
 61static int ignore_ppc = -1;
 62module_param(ignore_ppc, int, 0644);
 63MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
 64		 "limited by BIOS, this should help");
 65
 66#define PPC_REGISTERED   1
 67#define PPC_IN_USE       2
 68
 69static int acpi_processor_ppc_status;
 70
 71static int acpi_processor_ppc_notifier(struct notifier_block *nb,
 72				       unsigned long event, void *data)
 73{
 74	struct cpufreq_policy *policy = data;
 75	struct acpi_processor *pr;
 76	unsigned int ppc = 0;
 77
 78	if (event == CPUFREQ_START && ignore_ppc <= 0) {
 79		ignore_ppc = 0;
 80		return 0;
 81	}
 82
 83	if (ignore_ppc)
 84		return 0;
 85
 86	if (event != CPUFREQ_ADJUST)
 87		return 0;
 88
 89	mutex_lock(&performance_mutex);
 90
 91	pr = per_cpu(processors, policy->cpu);
 92	if (!pr || !pr->performance)
 93		goto out;
 94
 95	ppc = (unsigned int)pr->performance_platform_limit;
 96
 97	if (ppc >= pr->performance->state_count)
 98		goto out;
 99
100	cpufreq_verify_within_limits(policy, 0,
101				     pr->performance->states[ppc].
102				     core_frequency * 1000);
103
104      out:
105	mutex_unlock(&performance_mutex);
106
107	return 0;
108}
109
110static struct notifier_block acpi_ppc_notifier_block = {
111	.notifier_call = acpi_processor_ppc_notifier,
112};
113
114static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
115{
116	acpi_status status = 0;
117	unsigned long long ppc = 0;
118
 
 
119
120	if (!pr)
121		return -EINVAL;
122
123	/*
124	 * _PPC indicates the maximum state currently supported by the platform
125	 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
126	 */
127	status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
 
 
 
 
 
 
 
 
128
129	if (status != AE_NOT_FOUND)
130		acpi_processor_ppc_status |= PPC_IN_USE;
131
132	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
133		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
134		return -ENODEV;
135	}
136
137	pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
138		       (int)ppc, ppc ? "" : "not");
139
140	pr->performance_platform_limit = (int)ppc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
142	return 0;
143}
144
145#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE	0x80
146/*
147 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
148 * @handle: ACPI processor handle
149 * @status: the status code of _PPC evaluation
150 *	0: success. OSPM is now using the performance state specificed.
151 *	1: failure. OSPM has not changed the number of P-states in use
152 */
153static void acpi_processor_ppc_ost(acpi_handle handle, int status)
154{
155	if (acpi_has_method(handle, "_OST"))
156		acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
157				  status, NULL);
158}
159
160int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
161{
162	int ret;
163
164	if (ignore_ppc) {
165		/*
166		 * Only when it is notification event, the _OST object
167		 * will be evaluated. Otherwise it is skipped.
168		 */
169		if (event_flag)
170			acpi_processor_ppc_ost(pr->handle, 1);
171		return 0;
172	}
173
174	ret = acpi_processor_get_platform_limit(pr);
175	/*
176	 * Only when it is notification event, the _OST object
177	 * will be evaluated. Otherwise it is skipped.
178	 */
179	if (event_flag) {
180		if (ret < 0)
181			acpi_processor_ppc_ost(pr->handle, 1);
182		else
183			acpi_processor_ppc_ost(pr->handle, 0);
184	}
185	if (ret < 0)
186		return (ret);
187	else
188		return cpufreq_update_policy(pr->id);
189}
190
191int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
192{
193	struct acpi_processor *pr;
194
195	pr = per_cpu(processors, cpu);
196	if (!pr || !pr->performance || !pr->performance->state_count)
197		return -ENODEV;
 
198	*limit = pr->performance->states[pr->performance_platform_limit].
199		core_frequency * 1000;
200	return 0;
201}
202EXPORT_SYMBOL(acpi_processor_get_bios_limit);
203
204void acpi_processor_ppc_init(void)
205{
206	if (!cpufreq_register_notifier
207	    (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
208		acpi_processor_ppc_status |= PPC_REGISTERED;
209	else
210		printk(KERN_DEBUG
211		       "Warning: Processor Platform Limit not supported.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212}
213
214void acpi_processor_ppc_exit(void)
215{
216	if (acpi_processor_ppc_status & PPC_REGISTERED)
217		cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
218					    CPUFREQ_POLICY_NOTIFIER);
 
219
220	acpi_processor_ppc_status &= ~PPC_REGISTERED;
 
 
221}
222
223static int acpi_processor_get_performance_control(struct acpi_processor *pr)
224{
225	int result = 0;
226	acpi_status status = 0;
227	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
228	union acpi_object *pct = NULL;
229	union acpi_object obj = { 0 };
230
231
232	status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
233	if (ACPI_FAILURE(status)) {
234		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
235		return -ENODEV;
236	}
237
238	pct = (union acpi_object *)buffer.pointer;
239	if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
240	    || (pct->package.count != 2)) {
241		printk(KERN_ERR PREFIX "Invalid _PCT data\n");
242		result = -EFAULT;
243		goto end;
244	}
245
246	/*
247	 * control_register
248	 */
249
250	obj = pct->package.elements[0];
251
252	if ((obj.type != ACPI_TYPE_BUFFER)
253	    || (obj.buffer.length < sizeof(struct acpi_pct_register))
254	    || (obj.buffer.pointer == NULL)) {
255		printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
256		result = -EFAULT;
257		goto end;
258	}
259	memcpy(&pr->performance->control_register, obj.buffer.pointer,
260	       sizeof(struct acpi_pct_register));
261
262	/*
263	 * status_register
264	 */
265
266	obj = pct->package.elements[1];
267
268	if ((obj.type != ACPI_TYPE_BUFFER)
269	    || (obj.buffer.length < sizeof(struct acpi_pct_register))
270	    || (obj.buffer.pointer == NULL)) {
271		printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
272		result = -EFAULT;
273		goto end;
274	}
275
276	memcpy(&pr->performance->status_register, obj.buffer.pointer,
277	       sizeof(struct acpi_pct_register));
278
279      end:
280	kfree(buffer.pointer);
281
282	return result;
283}
284
285#ifdef CONFIG_X86
286/*
287 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
288 * in their ACPI data. Calculate the real values and fix up the _PSS data.
289 */
290static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
291{
292	u32 hi, lo, fid, did;
293	int index = px->control & 0x00000007;
294
295	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
296		return;
297
298	if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
299	    || boot_cpu_data.x86 == 0x11) {
300		rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
301		/*
302		 * MSR C001_0064+:
303		 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
304		 */
305		if (!(hi & BIT(31)))
306			return;
307
308		fid = lo & 0x3f;
309		did = (lo >> 6) & 7;
310		if (boot_cpu_data.x86 == 0x10)
311			px->core_frequency = (100 * (fid + 0x10)) >> did;
312		else
313			px->core_frequency = (100 * (fid + 8)) >> did;
314	}
315}
316#else
317static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
318#endif
319
320static int acpi_processor_get_performance_states(struct acpi_processor *pr)
321{
322	int result = 0;
323	acpi_status status = AE_OK;
324	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
325	struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
326	struct acpi_buffer state = { 0, NULL };
327	union acpi_object *pss = NULL;
328	int i;
329	int last_invalid = -1;
330
331
332	status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
333	if (ACPI_FAILURE(status)) {
334		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
335		return -ENODEV;
336	}
337
338	pss = buffer.pointer;
339	if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
340		printk(KERN_ERR PREFIX "Invalid _PSS data\n");
341		result = -EFAULT;
342		goto end;
343	}
344
345	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
346			  pss->package.count));
347
348	pr->performance->state_count = pss->package.count;
349	pr->performance->states =
350	    kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
351		    GFP_KERNEL);
 
352	if (!pr->performance->states) {
353		result = -ENOMEM;
354		goto end;
355	}
356
357	for (i = 0; i < pr->performance->state_count; i++) {
358
359		struct acpi_processor_px *px = &(pr->performance->states[i]);
360
361		state.length = sizeof(struct acpi_processor_px);
362		state.pointer = px;
363
364		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
365
366		status = acpi_extract_package(&(pss->package.elements[i]),
367					      &format, &state);
368		if (ACPI_FAILURE(status)) {
369			ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
 
370			result = -EFAULT;
371			kfree(pr->performance->states);
372			goto end;
373		}
374
375		amd_fixup_frequency(px, i);
376
377		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
378				  "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
379				  i,
380				  (u32) px->core_frequency,
381				  (u32) px->power,
382				  (u32) px->transition_latency,
383				  (u32) px->bus_master_latency,
384				  (u32) px->control, (u32) px->status));
385
386		/*
387 		 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
388		 */
389		if (!px->core_frequency ||
390		    ((u32)(px->core_frequency * 1000) !=
391		     (px->core_frequency * 1000))) {
392			printk(KERN_ERR FW_BUG PREFIX
393			       "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
394			       pr->id, px->core_frequency);
395			if (last_invalid == -1)
396				last_invalid = i;
397		} else {
398			if (last_invalid != -1) {
399				/*
400				 * Copy this valid entry over last_invalid entry
401				 */
402				memcpy(&(pr->performance->states[last_invalid]),
403				       px, sizeof(struct acpi_processor_px));
404				++last_invalid;
405			}
406		}
407	}
408
409	if (last_invalid == 0) {
410		printk(KERN_ERR FW_BUG PREFIX
411		       "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
412		result = -EFAULT;
413		kfree(pr->performance->states);
414		pr->performance->states = NULL;
415	}
416
417	if (last_invalid > 0)
418		pr->performance->state_count = last_invalid;
419
420      end:
421	kfree(buffer.pointer);
422
423	return result;
424}
425
426int acpi_processor_get_performance_info(struct acpi_processor *pr)
427{
428	int result = 0;
429
430	if (!pr || !pr->performance || !pr->handle)
431		return -EINVAL;
432
433	if (!acpi_has_method(pr->handle, "_PCT")) {
434		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
435				  "ACPI-based processor performance control unavailable\n"));
436		return -ENODEV;
437	}
438
439	result = acpi_processor_get_performance_control(pr);
440	if (result)
441		goto update_bios;
442
443	result = acpi_processor_get_performance_states(pr);
444	if (result)
445		goto update_bios;
446
447	/* We need to call _PPC once when cpufreq starts */
448	if (ignore_ppc != 1)
449		result = acpi_processor_get_platform_limit(pr);
450
451	return result;
452
453	/*
454	 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
455	 * the BIOS is older than the CPU and does not know its frequencies
456	 */
457 update_bios:
458#ifdef CONFIG_X86
459	if (acpi_has_method(pr->handle, "_PPC")) {
460		if(boot_cpu_has(X86_FEATURE_EST))
461			printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
462			       "frequency support\n");
463	}
464#endif
465	return result;
466}
467EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
468int acpi_processor_notify_smm(struct module *calling_module)
 
469{
470	acpi_status status;
471	static int is_done = 0;
472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
474	if (!(acpi_processor_ppc_status & PPC_REGISTERED))
475		return -EBUSY;
476
477	if (!try_module_get(calling_module))
478		return -EINVAL;
479
480	/* is_done is set to negative if an error occurred,
481	 * and to postitive if _no_ error occurred, but SMM
482	 * was already notified. This avoids double notification
483	 * which might lead to unexpected results...
484	 */
485	if (is_done > 0) {
486		module_put(calling_module);
487		return 0;
488	} else if (is_done < 0) {
489		module_put(calling_module);
490		return is_done;
491	}
492
493	is_done = -EIO;
494
495	/* Can't write pstate_control to smi_command if either value is zero */
496	if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
497		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
498		module_put(calling_module);
499		return 0;
500	}
501
502	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
503			  "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
504			  acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
505
506	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
507				    (u32) acpi_gbl_FADT.pstate_control, 8);
508	if (ACPI_FAILURE(status)) {
509		ACPI_EXCEPTION((AE_INFO, status,
510				"Failed to write pstate_control [0x%x] to "
511				"smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
512				acpi_gbl_FADT.smi_command));
513		module_put(calling_module);
514		return status;
515	}
516
517	/* Success. If there's no _PPC, we need to fear nothing, so
518	 * we can allow the cpufreq driver to be rmmod'ed. */
519	is_done = 1;
 
 
 
 
 
 
520
521	if (!(acpi_processor_ppc_status & PPC_IN_USE))
522		module_put(calling_module);
523
524	return 0;
525}
526
527EXPORT_SYMBOL(acpi_processor_notify_smm);
528
529static int acpi_processor_get_psd(struct acpi_processor	*pr)
530{
531	int result = 0;
532	acpi_status status = AE_OK;
533	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
534	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
535	struct acpi_buffer state = {0, NULL};
536	union acpi_object  *psd = NULL;
537	struct acpi_psd_package *pdomain;
538
539	status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
540	if (ACPI_FAILURE(status)) {
541		return -ENODEV;
542	}
543
544	psd = buffer.pointer;
545	if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
546		printk(KERN_ERR PREFIX "Invalid _PSD data\n");
547		result = -EFAULT;
548		goto end;
549	}
550
551	if (psd->package.count != 1) {
552		printk(KERN_ERR PREFIX "Invalid _PSD data\n");
553		result = -EFAULT;
554		goto end;
555	}
556
557	pdomain = &(pr->performance->domain_info);
558
559	state.length = sizeof(struct acpi_psd_package);
560	state.pointer = pdomain;
561
562	status = acpi_extract_package(&(psd->package.elements[0]),
563		&format, &state);
564	if (ACPI_FAILURE(status)) {
565		printk(KERN_ERR PREFIX "Invalid _PSD data\n");
566		result = -EFAULT;
567		goto end;
568	}
569
570	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
571		printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
572		result = -EFAULT;
573		goto end;
574	}
575
576	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
577		printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
578		result = -EFAULT;
579		goto end;
580	}
581
582	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
583	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
584	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
585		printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
586		result = -EFAULT;
587		goto end;
588	}
589end:
590	kfree(buffer.pointer);
591	return result;
592}
 
593
594int acpi_processor_preregister_performance(
595		struct acpi_processor_performance __percpu *performance)
596{
597	int count_target;
598	int retval = 0;
599	unsigned int i, j;
600	cpumask_var_t covered_cpus;
601	struct acpi_processor *pr;
602	struct acpi_psd_package *pdomain;
603	struct acpi_processor *match_pr;
604	struct acpi_psd_package *match_pdomain;
605
606	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
607		return -ENOMEM;
608
609	mutex_lock(&performance_mutex);
610
611	/*
612	 * Check if another driver has already registered, and abort before
613	 * changing pr->performance if it has. Check input data as well.
614	 */
615	for_each_possible_cpu(i) {
616		pr = per_cpu(processors, i);
617		if (!pr) {
618			/* Look only at processors in ACPI namespace */
619			continue;
620		}
621
622		if (pr->performance) {
623			retval = -EBUSY;
624			goto err_out;
625		}
626
627		if (!performance || !per_cpu_ptr(performance, i)) {
628			retval = -EINVAL;
629			goto err_out;
630		}
631	}
632
633	/* Call _PSD for all CPUs */
634	for_each_possible_cpu(i) {
635		pr = per_cpu(processors, i);
636		if (!pr)
637			continue;
638
639		pr->performance = per_cpu_ptr(performance, i);
640		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
641		if (acpi_processor_get_psd(pr)) {
642			retval = -EINVAL;
643			continue;
644		}
645	}
646	if (retval)
647		goto err_ret;
648
649	/*
650	 * Now that we have _PSD data from all CPUs, lets setup P-state 
651	 * domain info.
652	 */
653	for_each_possible_cpu(i) {
654		pr = per_cpu(processors, i);
655		if (!pr)
656			continue;
657
658		if (cpumask_test_cpu(i, covered_cpus))
659			continue;
660
661		pdomain = &(pr->performance->domain_info);
662		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
663		cpumask_set_cpu(i, covered_cpus);
664		if (pdomain->num_processors <= 1)
665			continue;
666
667		/* Validate the Domain info */
668		count_target = pdomain->num_processors;
669		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
670			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
671		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
672			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
673		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
674			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
675
676		for_each_possible_cpu(j) {
677			if (i == j)
678				continue;
679
680			match_pr = per_cpu(processors, j);
681			if (!match_pr)
682				continue;
683
684			match_pdomain = &(match_pr->performance->domain_info);
685			if (match_pdomain->domain != pdomain->domain)
686				continue;
687
688			/* Here i and j are in the same domain */
689
690			if (match_pdomain->num_processors != count_target) {
691				retval = -EINVAL;
692				goto err_ret;
693			}
694
695			if (pdomain->coord_type != match_pdomain->coord_type) {
696				retval = -EINVAL;
697				goto err_ret;
698			}
699
700			cpumask_set_cpu(j, covered_cpus);
701			cpumask_set_cpu(j, pr->performance->shared_cpu_map);
702		}
703
704		for_each_possible_cpu(j) {
705			if (i == j)
706				continue;
707
708			match_pr = per_cpu(processors, j);
709			if (!match_pr)
710				continue;
711
712			match_pdomain = &(match_pr->performance->domain_info);
713			if (match_pdomain->domain != pdomain->domain)
714				continue;
715
716			match_pr->performance->shared_type = 
717					pr->performance->shared_type;
718			cpumask_copy(match_pr->performance->shared_cpu_map,
719				     pr->performance->shared_cpu_map);
720		}
721	}
722
723err_ret:
724	for_each_possible_cpu(i) {
725		pr = per_cpu(processors, i);
726		if (!pr || !pr->performance)
727			continue;
728
729		/* Assume no coordination on any error parsing domain info */
730		if (retval) {
731			cpumask_clear(pr->performance->shared_cpu_map);
732			cpumask_set_cpu(i, pr->performance->shared_cpu_map);
733			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
734		}
735		pr->performance = NULL; /* Will be set for real in register */
736	}
737
738err_out:
739	mutex_unlock(&performance_mutex);
740	free_cpumask_var(covered_cpus);
741	return retval;
742}
743EXPORT_SYMBOL(acpi_processor_preregister_performance);
744
745int
746acpi_processor_register_performance(struct acpi_processor_performance
747				    *performance, unsigned int cpu)
748{
749	struct acpi_processor *pr;
750
751	if (!(acpi_processor_ppc_status & PPC_REGISTERED))
752		return -EINVAL;
753
754	mutex_lock(&performance_mutex);
755
756	pr = per_cpu(processors, cpu);
757	if (!pr) {
758		mutex_unlock(&performance_mutex);
759		return -ENODEV;
760	}
761
762	if (pr->performance) {
763		mutex_unlock(&performance_mutex);
764		return -EBUSY;
765	}
766
767	WARN_ON(!performance);
768
769	pr->performance = performance;
770
771	if (acpi_processor_get_performance_info(pr)) {
772		pr->performance = NULL;
773		mutex_unlock(&performance_mutex);
774		return -EIO;
775	}
776
777	mutex_unlock(&performance_mutex);
778	return 0;
779}
780
781EXPORT_SYMBOL(acpi_processor_register_performance);
782
783void acpi_processor_unregister_performance(unsigned int cpu)
784{
785	struct acpi_processor *pr;
786
787	mutex_lock(&performance_mutex);
788
789	pr = per_cpu(processors, cpu);
790	if (!pr) {
791		mutex_unlock(&performance_mutex);
792		return;
793	}
794
795	if (pr->performance)
796		kfree(pr->performance->states);
 
797	pr->performance = NULL;
798
 
799	mutex_unlock(&performance_mutex);
800
801	return;
802}
803
804EXPORT_SYMBOL(acpi_processor_unregister_performance);