Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 */
11
12#define pr_fmt(fmt) "ACPI: " fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/cpufreq.h>
18#include <linux/slab.h>
19#include <linux/acpi.h>
20#include <acpi/processor.h>
21#ifdef CONFIG_X86
22#include <asm/cpufeature.h>
23#endif
24
25#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
26
27/*
28 * _PPC support is implemented as a CPUfreq policy notifier:
29 * This means each time a CPUfreq driver registered also with
30 * the ACPI core is asked to change the speed policy, the maximum
31 * value is adjusted so that it is within the platform limit.
32 *
33 * Also, when a new platform limit value is detected, the CPUfreq
34 * policy is adjusted accordingly.
35 */
36
37/* ignore_ppc:
38 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
39 * ignore _PPC
40 * 0 -> cpufreq low level drivers initialized -> consider _PPC values
41 * 1 -> ignore _PPC totally -> forced by user through boot param
42 */
43static int ignore_ppc = -1;
44module_param(ignore_ppc, int, 0644);
45MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
46 "limited by BIOS, this should help");
47
48static bool acpi_processor_ppc_in_use;
49
50static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
51{
52 acpi_status status = 0;
53 unsigned long long ppc = 0;
54 s32 qos_value;
55 int index;
56 int ret;
57
58 if (!pr)
59 return -EINVAL;
60
61 /*
62 * _PPC indicates the maximum state currently supported by the platform
63 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
64 */
65 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
66 if (status != AE_NOT_FOUND) {
67 acpi_processor_ppc_in_use = true;
68
69 if (ACPI_FAILURE(status)) {
70 acpi_evaluation_failure_warn(pr->handle, "_PPC", status);
71 return -ENODEV;
72 }
73 }
74
75 index = ppc;
76
77 if (pr->performance_platform_limit == index ||
78 ppc >= pr->performance->state_count)
79 return 0;
80
81 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
82 index, index ? "is" : "is not");
83
84 pr->performance_platform_limit = index;
85
86 if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
87 return 0;
88
89 /*
90 * If _PPC returns 0, it means that all of the available states can be
91 * used ("no limit").
92 */
93 if (index == 0)
94 qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
95 else
96 qos_value = pr->performance->states[index].core_frequency * 1000;
97
98 ret = freq_qos_update_request(&pr->perflib_req, qos_value);
99 if (ret < 0) {
100 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
101 pr->id, ret);
102 }
103
104 return 0;
105}
106
107#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
108/*
109 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
110 * @handle: ACPI processor handle
111 * @status: the status code of _PPC evaluation
112 * 0: success. OSPM is now using the performance state specified.
113 * 1: failure. OSPM has not changed the number of P-states in use
114 */
115static void acpi_processor_ppc_ost(acpi_handle handle, int status)
116{
117 if (acpi_has_method(handle, "_OST"))
118 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
119 status, NULL);
120}
121
122void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
123{
124 int ret;
125
126 if (ignore_ppc || !pr->performance) {
127 /*
128 * Only when it is notification event, the _OST object
129 * will be evaluated. Otherwise it is skipped.
130 */
131 if (event_flag)
132 acpi_processor_ppc_ost(pr->handle, 1);
133 return;
134 }
135
136 ret = acpi_processor_get_platform_limit(pr);
137 /*
138 * Only when it is notification event, the _OST object
139 * will be evaluated. Otherwise it is skipped.
140 */
141 if (event_flag) {
142 if (ret < 0)
143 acpi_processor_ppc_ost(pr->handle, 1);
144 else
145 acpi_processor_ppc_ost(pr->handle, 0);
146 }
147 if (ret >= 0)
148 cpufreq_update_limits(pr->id);
149}
150
151int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
152{
153 struct acpi_processor *pr;
154
155 pr = per_cpu(processors, cpu);
156 if (!pr || !pr->performance || !pr->performance->state_count)
157 return -ENODEV;
158
159 *limit = pr->performance->states[pr->performance_platform_limit].
160 core_frequency * 1000;
161 return 0;
162}
163EXPORT_SYMBOL(acpi_processor_get_bios_limit);
164
165void acpi_processor_ignore_ppc_init(void)
166{
167 if (ignore_ppc < 0)
168 ignore_ppc = 0;
169}
170
171void acpi_processor_ppc_init(struct cpufreq_policy *policy)
172{
173 unsigned int cpu;
174
175 for_each_cpu(cpu, policy->related_cpus) {
176 struct acpi_processor *pr = per_cpu(processors, cpu);
177 int ret;
178
179 if (!pr)
180 continue;
181
182 /*
183 * Reset performance_platform_limit in case there is a stale
184 * value in it, so as to make it match the "no limit" QoS value
185 * below.
186 */
187 pr->performance_platform_limit = 0;
188
189 ret = freq_qos_add_request(&policy->constraints,
190 &pr->perflib_req, FREQ_QOS_MAX,
191 FREQ_QOS_MAX_DEFAULT_VALUE);
192 if (ret < 0)
193 pr_err("Failed to add freq constraint for CPU%d (%d)\n",
194 cpu, ret);
195 }
196}
197
198void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
199{
200 unsigned int cpu;
201
202 for_each_cpu(cpu, policy->related_cpus) {
203 struct acpi_processor *pr = per_cpu(processors, cpu);
204
205 if (pr)
206 freq_qos_remove_request(&pr->perflib_req);
207 }
208}
209
210#ifdef CONFIG_X86
211
212static DEFINE_MUTEX(performance_mutex);
213
214static int acpi_processor_get_performance_control(struct acpi_processor *pr)
215{
216 int result = 0;
217 acpi_status status = 0;
218 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
219 union acpi_object *pct = NULL;
220 union acpi_object obj = { 0 };
221
222 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
223 if (ACPI_FAILURE(status)) {
224 acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
225 return -ENODEV;
226 }
227
228 pct = (union acpi_object *)buffer.pointer;
229 if (!pct || pct->type != ACPI_TYPE_PACKAGE || pct->package.count != 2) {
230 pr_err("Invalid _PCT data\n");
231 result = -EFAULT;
232 goto end;
233 }
234
235 /*
236 * control_register
237 */
238
239 obj = pct->package.elements[0];
240
241 if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
242 obj.buffer.length < sizeof(struct acpi_pct_register)) {
243 pr_err("Invalid _PCT data (control_register)\n");
244 result = -EFAULT;
245 goto end;
246 }
247 memcpy(&pr->performance->control_register, obj.buffer.pointer,
248 sizeof(struct acpi_pct_register));
249
250 /*
251 * status_register
252 */
253
254 obj = pct->package.elements[1];
255
256 if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
257 obj.buffer.length < sizeof(struct acpi_pct_register)) {
258 pr_err("Invalid _PCT data (status_register)\n");
259 result = -EFAULT;
260 goto end;
261 }
262
263 memcpy(&pr->performance->status_register, obj.buffer.pointer,
264 sizeof(struct acpi_pct_register));
265
266end:
267 kfree(buffer.pointer);
268
269 return result;
270}
271
272/*
273 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
274 * in their ACPI data. Calculate the real values and fix up the _PSS data.
275 */
276static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
277{
278 u32 hi, lo, fid, did;
279 int index = px->control & 0x00000007;
280
281 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
282 return;
283
284 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) ||
285 boot_cpu_data.x86 == 0x11) {
286 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
287 /*
288 * MSR C001_0064+:
289 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
290 */
291 if (!(hi & BIT(31)))
292 return;
293
294 fid = lo & 0x3f;
295 did = (lo >> 6) & 7;
296 if (boot_cpu_data.x86 == 0x10)
297 px->core_frequency = (100 * (fid + 0x10)) >> did;
298 else
299 px->core_frequency = (100 * (fid + 8)) >> did;
300 }
301}
302
303static int acpi_processor_get_performance_states(struct acpi_processor *pr)
304{
305 int result = 0;
306 acpi_status status = AE_OK;
307 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
308 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
309 struct acpi_buffer state = { 0, NULL };
310 union acpi_object *pss = NULL;
311 int i;
312 int last_invalid = -1;
313
314 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
315 if (ACPI_FAILURE(status)) {
316 acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
317 return -ENODEV;
318 }
319
320 pss = buffer.pointer;
321 if (!pss || pss->type != ACPI_TYPE_PACKAGE) {
322 pr_err("Invalid _PSS data\n");
323 result = -EFAULT;
324 goto end;
325 }
326
327 acpi_handle_debug(pr->handle, "Found %d performance states\n",
328 pss->package.count);
329
330 pr->performance->state_count = pss->package.count;
331 pr->performance->states =
332 kmalloc_array(pss->package.count,
333 sizeof(struct acpi_processor_px),
334 GFP_KERNEL);
335 if (!pr->performance->states) {
336 result = -ENOMEM;
337 goto end;
338 }
339
340 for (i = 0; i < pr->performance->state_count; i++) {
341
342 struct acpi_processor_px *px = &(pr->performance->states[i]);
343
344 state.length = sizeof(struct acpi_processor_px);
345 state.pointer = px;
346
347 acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
348
349 status = acpi_extract_package(&(pss->package.elements[i]),
350 &format, &state);
351 if (ACPI_FAILURE(status)) {
352 acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n",
353 acpi_format_exception(status));
354 result = -EFAULT;
355 kfree(pr->performance->states);
356 goto end;
357 }
358
359 amd_fixup_frequency(px, i);
360
361 acpi_handle_debug(pr->handle,
362 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
363 i,
364 (u32) px->core_frequency,
365 (u32) px->power,
366 (u32) px->transition_latency,
367 (u32) px->bus_master_latency,
368 (u32) px->control, (u32) px->status);
369
370 /*
371 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
372 */
373 if (!px->core_frequency ||
374 (u32)(px->core_frequency * 1000) != px->core_frequency * 1000) {
375 pr_err(FW_BUG
376 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
377 pr->id, px->core_frequency);
378 if (last_invalid == -1)
379 last_invalid = i;
380 } else {
381 if (last_invalid != -1) {
382 /*
383 * Copy this valid entry over last_invalid entry
384 */
385 memcpy(&(pr->performance->states[last_invalid]),
386 px, sizeof(struct acpi_processor_px));
387 ++last_invalid;
388 }
389 }
390 }
391
392 if (last_invalid == 0) {
393 pr_err(FW_BUG
394 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
395 result = -EFAULT;
396 kfree(pr->performance->states);
397 pr->performance->states = NULL;
398 }
399
400 if (last_invalid > 0)
401 pr->performance->state_count = last_invalid;
402
403end:
404 kfree(buffer.pointer);
405
406 return result;
407}
408
409int acpi_processor_get_performance_info(struct acpi_processor *pr)
410{
411 int result = 0;
412
413 if (!pr || !pr->performance || !pr->handle)
414 return -EINVAL;
415
416 if (!acpi_has_method(pr->handle, "_PCT")) {
417 acpi_handle_debug(pr->handle,
418 "ACPI-based processor performance control unavailable\n");
419 return -ENODEV;
420 }
421
422 result = acpi_processor_get_performance_control(pr);
423 if (result)
424 goto update_bios;
425
426 result = acpi_processor_get_performance_states(pr);
427 if (result)
428 goto update_bios;
429
430 /* We need to call _PPC once when cpufreq starts */
431 if (ignore_ppc != 1)
432 result = acpi_processor_get_platform_limit(pr);
433
434 return result;
435
436 /*
437 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
438 * the BIOS is older than the CPU and does not know its frequencies
439 */
440 update_bios:
441 if (acpi_has_method(pr->handle, "_PPC")) {
442 if(boot_cpu_has(X86_FEATURE_EST))
443 pr_warn(FW_BUG "BIOS needs update for CPU "
444 "frequency support\n");
445 }
446 return result;
447}
448EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
449
450int acpi_processor_pstate_control(void)
451{
452 acpi_status status;
453
454 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
455 return 0;
456
457 pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n",
458 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command);
459
460 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
461 (u32)acpi_gbl_FADT.pstate_control, 8);
462 if (ACPI_SUCCESS(status))
463 return 1;
464
465 pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n",
466 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command,
467 acpi_format_exception(status));
468 return -EIO;
469}
470
471int acpi_processor_notify_smm(struct module *calling_module)
472{
473 static int is_done;
474 int result = 0;
475
476 if (!acpi_processor_cpufreq_init)
477 return -EBUSY;
478
479 if (!try_module_get(calling_module))
480 return -EINVAL;
481
482 /*
483 * is_done is set to negative if an error occurs and to 1 if no error
484 * occurrs, but SMM has been notified already. This avoids repeated
485 * notification which might lead to unexpected results.
486 */
487 if (is_done != 0) {
488 if (is_done < 0)
489 result = is_done;
490
491 goto out_put;
492 }
493
494 result = acpi_processor_pstate_control();
495 if (result <= 0) {
496 if (result) {
497 is_done = result;
498 } else {
499 pr_debug("No SMI port or pstate_control\n");
500 is_done = 1;
501 }
502 goto out_put;
503 }
504
505 is_done = 1;
506 /*
507 * Success. If there _PPC, unloading the cpufreq driver would be risky,
508 * so disallow it in that case.
509 */
510 if (acpi_processor_ppc_in_use)
511 return 0;
512
513out_put:
514 module_put(calling_module);
515 return result;
516}
517EXPORT_SYMBOL(acpi_processor_notify_smm);
518
519int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
520{
521 int result = 0;
522 acpi_status status = AE_OK;
523 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
524 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
525 struct acpi_buffer state = {0, NULL};
526 union acpi_object *psd = NULL;
527
528 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
529 if (ACPI_FAILURE(status)) {
530 return -ENODEV;
531 }
532
533 psd = buffer.pointer;
534 if (!psd || psd->type != ACPI_TYPE_PACKAGE) {
535 pr_err("Invalid _PSD data\n");
536 result = -EFAULT;
537 goto end;
538 }
539
540 if (psd->package.count != 1) {
541 pr_err("Invalid _PSD data\n");
542 result = -EFAULT;
543 goto end;
544 }
545
546 state.length = sizeof(struct acpi_psd_package);
547 state.pointer = pdomain;
548
549 status = acpi_extract_package(&(psd->package.elements[0]), &format, &state);
550 if (ACPI_FAILURE(status)) {
551 pr_err("Invalid _PSD data\n");
552 result = -EFAULT;
553 goto end;
554 }
555
556 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
557 pr_err("Unknown _PSD:num_entries\n");
558 result = -EFAULT;
559 goto end;
560 }
561
562 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
563 pr_err("Unknown _PSD:revision\n");
564 result = -EFAULT;
565 goto end;
566 }
567
568 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
569 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
570 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
571 pr_err("Invalid _PSD:coord_type\n");
572 result = -EFAULT;
573 goto end;
574 }
575end:
576 kfree(buffer.pointer);
577 return result;
578}
579EXPORT_SYMBOL(acpi_processor_get_psd);
580
581int acpi_processor_preregister_performance(
582 struct acpi_processor_performance __percpu *performance)
583{
584 int count_target;
585 int retval = 0;
586 unsigned int i, j;
587 cpumask_var_t covered_cpus;
588 struct acpi_processor *pr;
589 struct acpi_psd_package *pdomain;
590 struct acpi_processor *match_pr;
591 struct acpi_psd_package *match_pdomain;
592
593 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
594 return -ENOMEM;
595
596 mutex_lock(&performance_mutex);
597
598 /*
599 * Check if another driver has already registered, and abort before
600 * changing pr->performance if it has. Check input data as well.
601 */
602 for_each_possible_cpu(i) {
603 pr = per_cpu(processors, i);
604 if (!pr) {
605 /* Look only at processors in ACPI namespace */
606 continue;
607 }
608
609 if (pr->performance) {
610 retval = -EBUSY;
611 goto err_out;
612 }
613
614 if (!performance || !per_cpu_ptr(performance, i)) {
615 retval = -EINVAL;
616 goto err_out;
617 }
618 }
619
620 /* Call _PSD for all CPUs */
621 for_each_possible_cpu(i) {
622 pr = per_cpu(processors, i);
623 if (!pr)
624 continue;
625
626 pr->performance = per_cpu_ptr(performance, i);
627 pdomain = &(pr->performance->domain_info);
628 if (acpi_processor_get_psd(pr->handle, pdomain)) {
629 retval = -EINVAL;
630 continue;
631 }
632 }
633 if (retval)
634 goto err_ret;
635
636 /*
637 * Now that we have _PSD data from all CPUs, lets setup P-state
638 * domain info.
639 */
640 for_each_possible_cpu(i) {
641 pr = per_cpu(processors, i);
642 if (!pr)
643 continue;
644
645 if (cpumask_test_cpu(i, covered_cpus))
646 continue;
647
648 pdomain = &(pr->performance->domain_info);
649 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
650 cpumask_set_cpu(i, covered_cpus);
651 if (pdomain->num_processors <= 1)
652 continue;
653
654 /* Validate the Domain info */
655 count_target = pdomain->num_processors;
656 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
657 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
658 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
659 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
660 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
661 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
662
663 for_each_possible_cpu(j) {
664 if (i == j)
665 continue;
666
667 match_pr = per_cpu(processors, j);
668 if (!match_pr)
669 continue;
670
671 match_pdomain = &(match_pr->performance->domain_info);
672 if (match_pdomain->domain != pdomain->domain)
673 continue;
674
675 /* Here i and j are in the same domain */
676
677 if (match_pdomain->num_processors != count_target) {
678 retval = -EINVAL;
679 goto err_ret;
680 }
681
682 if (pdomain->coord_type != match_pdomain->coord_type) {
683 retval = -EINVAL;
684 goto err_ret;
685 }
686
687 cpumask_set_cpu(j, covered_cpus);
688 cpumask_set_cpu(j, pr->performance->shared_cpu_map);
689 }
690
691 for_each_possible_cpu(j) {
692 if (i == j)
693 continue;
694
695 match_pr = per_cpu(processors, j);
696 if (!match_pr)
697 continue;
698
699 match_pdomain = &(match_pr->performance->domain_info);
700 if (match_pdomain->domain != pdomain->domain)
701 continue;
702
703 match_pr->performance->shared_type =
704 pr->performance->shared_type;
705 cpumask_copy(match_pr->performance->shared_cpu_map,
706 pr->performance->shared_cpu_map);
707 }
708 }
709
710err_ret:
711 for_each_possible_cpu(i) {
712 pr = per_cpu(processors, i);
713 if (!pr || !pr->performance)
714 continue;
715
716 /* Assume no coordination on any error parsing domain info */
717 if (retval) {
718 cpumask_clear(pr->performance->shared_cpu_map);
719 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
720 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
721 }
722 pr->performance = NULL; /* Will be set for real in register */
723 }
724
725err_out:
726 mutex_unlock(&performance_mutex);
727 free_cpumask_var(covered_cpus);
728 return retval;
729}
730EXPORT_SYMBOL(acpi_processor_preregister_performance);
731
732int acpi_processor_register_performance(struct acpi_processor_performance
733 *performance, unsigned int cpu)
734{
735 struct acpi_processor *pr;
736
737 if (!acpi_processor_cpufreq_init)
738 return -EINVAL;
739
740 mutex_lock(&performance_mutex);
741
742 pr = per_cpu(processors, cpu);
743 if (!pr) {
744 mutex_unlock(&performance_mutex);
745 return -ENODEV;
746 }
747
748 if (pr->performance) {
749 mutex_unlock(&performance_mutex);
750 return -EBUSY;
751 }
752
753 WARN_ON(!performance);
754
755 pr->performance = performance;
756
757 if (acpi_processor_get_performance_info(pr)) {
758 pr->performance = NULL;
759 mutex_unlock(&performance_mutex);
760 return -EIO;
761 }
762
763 mutex_unlock(&performance_mutex);
764 return 0;
765}
766EXPORT_SYMBOL(acpi_processor_register_performance);
767
768void acpi_processor_unregister_performance(unsigned int cpu)
769{
770 struct acpi_processor *pr;
771
772 mutex_lock(&performance_mutex);
773
774 pr = per_cpu(processors, cpu);
775 if (!pr)
776 goto unlock;
777
778 if (pr->performance)
779 kfree(pr->performance->states);
780
781 pr->performance = NULL;
782
783unlock:
784 mutex_unlock(&performance_mutex);
785}
786EXPORT_SYMBOL(acpi_processor_unregister_performance);
787#endif
1/*
2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 *
10 *
11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/cpufreq.h>
29#include <linux/slab.h>
30#include <linux/acpi.h>
31#include <acpi/processor.h>
32#ifdef CONFIG_X86
33#include <asm/cpufeature.h>
34#endif
35
36#define PREFIX "ACPI: "
37
38#define ACPI_PROCESSOR_CLASS "processor"
39#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
40#define _COMPONENT ACPI_PROCESSOR_COMPONENT
41ACPI_MODULE_NAME("processor_perflib");
42
43static DEFINE_MUTEX(performance_mutex);
44
45/*
46 * _PPC support is implemented as a CPUfreq policy notifier:
47 * This means each time a CPUfreq driver registered also with
48 * the ACPI core is asked to change the speed policy, the maximum
49 * value is adjusted so that it is within the platform limit.
50 *
51 * Also, when a new platform limit value is detected, the CPUfreq
52 * policy is adjusted accordingly.
53 */
54
55/* ignore_ppc:
56 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
57 * ignore _PPC
58 * 0 -> cpufreq low level drivers initialized -> consider _PPC values
59 * 1 -> ignore _PPC totally -> forced by user through boot param
60 */
61static int ignore_ppc = -1;
62module_param(ignore_ppc, int, 0644);
63MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
64 "limited by BIOS, this should help");
65
66#define PPC_REGISTERED 1
67#define PPC_IN_USE 2
68
69static int acpi_processor_ppc_status;
70
71static int acpi_processor_ppc_notifier(struct notifier_block *nb,
72 unsigned long event, void *data)
73{
74 struct cpufreq_policy *policy = data;
75 struct acpi_processor *pr;
76 unsigned int ppc = 0;
77
78 if (ignore_ppc < 0)
79 ignore_ppc = 0;
80
81 if (ignore_ppc)
82 return 0;
83
84 if (event != CPUFREQ_ADJUST)
85 return 0;
86
87 mutex_lock(&performance_mutex);
88
89 pr = per_cpu(processors, policy->cpu);
90 if (!pr || !pr->performance)
91 goto out;
92
93 ppc = (unsigned int)pr->performance_platform_limit;
94
95 if (ppc >= pr->performance->state_count)
96 goto out;
97
98 cpufreq_verify_within_limits(policy, 0,
99 pr->performance->states[ppc].
100 core_frequency * 1000);
101
102 out:
103 mutex_unlock(&performance_mutex);
104
105 return 0;
106}
107
108static struct notifier_block acpi_ppc_notifier_block = {
109 .notifier_call = acpi_processor_ppc_notifier,
110};
111
112static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
113{
114 acpi_status status = 0;
115 unsigned long long ppc = 0;
116
117
118 if (!pr)
119 return -EINVAL;
120
121 /*
122 * _PPC indicates the maximum state currently supported by the platform
123 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
124 */
125 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
126
127 if (status != AE_NOT_FOUND)
128 acpi_processor_ppc_status |= PPC_IN_USE;
129
130 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
131 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
132 return -ENODEV;
133 }
134
135 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
136 (int)ppc, ppc ? "" : "not");
137
138 pr->performance_platform_limit = (int)ppc;
139
140 return 0;
141}
142
143#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
144/*
145 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
146 * @handle: ACPI processor handle
147 * @status: the status code of _PPC evaluation
148 * 0: success. OSPM is now using the performance state specificed.
149 * 1: failure. OSPM has not changed the number of P-states in use
150 */
151static void acpi_processor_ppc_ost(acpi_handle handle, int status)
152{
153 if (acpi_has_method(handle, "_OST"))
154 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
155 status, NULL);
156}
157
158void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
159{
160 int ret;
161
162 if (ignore_ppc || !pr->performance) {
163 /*
164 * Only when it is notification event, the _OST object
165 * will be evaluated. Otherwise it is skipped.
166 */
167 if (event_flag)
168 acpi_processor_ppc_ost(pr->handle, 1);
169 return;
170 }
171
172 ret = acpi_processor_get_platform_limit(pr);
173 /*
174 * Only when it is notification event, the _OST object
175 * will be evaluated. Otherwise it is skipped.
176 */
177 if (event_flag) {
178 if (ret < 0)
179 acpi_processor_ppc_ost(pr->handle, 1);
180 else
181 acpi_processor_ppc_ost(pr->handle, 0);
182 }
183 if (ret >= 0)
184 cpufreq_update_policy(pr->id);
185}
186
187int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
188{
189 struct acpi_processor *pr;
190
191 pr = per_cpu(processors, cpu);
192 if (!pr || !pr->performance || !pr->performance->state_count)
193 return -ENODEV;
194 *limit = pr->performance->states[pr->performance_platform_limit].
195 core_frequency * 1000;
196 return 0;
197}
198EXPORT_SYMBOL(acpi_processor_get_bios_limit);
199
200void acpi_processor_ppc_init(void)
201{
202 if (!cpufreq_register_notifier
203 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
204 acpi_processor_ppc_status |= PPC_REGISTERED;
205 else
206 printk(KERN_DEBUG
207 "Warning: Processor Platform Limit not supported.\n");
208}
209
210void acpi_processor_ppc_exit(void)
211{
212 if (acpi_processor_ppc_status & PPC_REGISTERED)
213 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
214 CPUFREQ_POLICY_NOTIFIER);
215
216 acpi_processor_ppc_status &= ~PPC_REGISTERED;
217}
218
219static int acpi_processor_get_performance_control(struct acpi_processor *pr)
220{
221 int result = 0;
222 acpi_status status = 0;
223 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
224 union acpi_object *pct = NULL;
225 union acpi_object obj = { 0 };
226
227
228 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
229 if (ACPI_FAILURE(status)) {
230 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
231 return -ENODEV;
232 }
233
234 pct = (union acpi_object *)buffer.pointer;
235 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
236 || (pct->package.count != 2)) {
237 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
238 result = -EFAULT;
239 goto end;
240 }
241
242 /*
243 * control_register
244 */
245
246 obj = pct->package.elements[0];
247
248 if ((obj.type != ACPI_TYPE_BUFFER)
249 || (obj.buffer.length < sizeof(struct acpi_pct_register))
250 || (obj.buffer.pointer == NULL)) {
251 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
252 result = -EFAULT;
253 goto end;
254 }
255 memcpy(&pr->performance->control_register, obj.buffer.pointer,
256 sizeof(struct acpi_pct_register));
257
258 /*
259 * status_register
260 */
261
262 obj = pct->package.elements[1];
263
264 if ((obj.type != ACPI_TYPE_BUFFER)
265 || (obj.buffer.length < sizeof(struct acpi_pct_register))
266 || (obj.buffer.pointer == NULL)) {
267 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
268 result = -EFAULT;
269 goto end;
270 }
271
272 memcpy(&pr->performance->status_register, obj.buffer.pointer,
273 sizeof(struct acpi_pct_register));
274
275 end:
276 kfree(buffer.pointer);
277
278 return result;
279}
280
281#ifdef CONFIG_X86
282/*
283 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
284 * in their ACPI data. Calculate the real values and fix up the _PSS data.
285 */
286static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
287{
288 u32 hi, lo, fid, did;
289 int index = px->control & 0x00000007;
290
291 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
292 return;
293
294 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
295 || boot_cpu_data.x86 == 0x11) {
296 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
297 /*
298 * MSR C001_0064+:
299 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
300 */
301 if (!(hi & BIT(31)))
302 return;
303
304 fid = lo & 0x3f;
305 did = (lo >> 6) & 7;
306 if (boot_cpu_data.x86 == 0x10)
307 px->core_frequency = (100 * (fid + 0x10)) >> did;
308 else
309 px->core_frequency = (100 * (fid + 8)) >> did;
310 }
311}
312#else
313static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
314#endif
315
316static int acpi_processor_get_performance_states(struct acpi_processor *pr)
317{
318 int result = 0;
319 acpi_status status = AE_OK;
320 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
321 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
322 struct acpi_buffer state = { 0, NULL };
323 union acpi_object *pss = NULL;
324 int i;
325 int last_invalid = -1;
326
327
328 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
329 if (ACPI_FAILURE(status)) {
330 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
331 return -ENODEV;
332 }
333
334 pss = buffer.pointer;
335 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
336 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
337 result = -EFAULT;
338 goto end;
339 }
340
341 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
342 pss->package.count));
343
344 pr->performance->state_count = pss->package.count;
345 pr->performance->states =
346 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
347 GFP_KERNEL);
348 if (!pr->performance->states) {
349 result = -ENOMEM;
350 goto end;
351 }
352
353 for (i = 0; i < pr->performance->state_count; i++) {
354
355 struct acpi_processor_px *px = &(pr->performance->states[i]);
356
357 state.length = sizeof(struct acpi_processor_px);
358 state.pointer = px;
359
360 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
361
362 status = acpi_extract_package(&(pss->package.elements[i]),
363 &format, &state);
364 if (ACPI_FAILURE(status)) {
365 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
366 result = -EFAULT;
367 kfree(pr->performance->states);
368 goto end;
369 }
370
371 amd_fixup_frequency(px, i);
372
373 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
374 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
375 i,
376 (u32) px->core_frequency,
377 (u32) px->power,
378 (u32) px->transition_latency,
379 (u32) px->bus_master_latency,
380 (u32) px->control, (u32) px->status));
381
382 /*
383 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
384 */
385 if (!px->core_frequency ||
386 ((u32)(px->core_frequency * 1000) !=
387 (px->core_frequency * 1000))) {
388 printk(KERN_ERR FW_BUG PREFIX
389 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
390 pr->id, px->core_frequency);
391 if (last_invalid == -1)
392 last_invalid = i;
393 } else {
394 if (last_invalid != -1) {
395 /*
396 * Copy this valid entry over last_invalid entry
397 */
398 memcpy(&(pr->performance->states[last_invalid]),
399 px, sizeof(struct acpi_processor_px));
400 ++last_invalid;
401 }
402 }
403 }
404
405 if (last_invalid == 0) {
406 printk(KERN_ERR FW_BUG PREFIX
407 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
408 result = -EFAULT;
409 kfree(pr->performance->states);
410 pr->performance->states = NULL;
411 }
412
413 if (last_invalid > 0)
414 pr->performance->state_count = last_invalid;
415
416 end:
417 kfree(buffer.pointer);
418
419 return result;
420}
421
422int acpi_processor_get_performance_info(struct acpi_processor *pr)
423{
424 int result = 0;
425
426 if (!pr || !pr->performance || !pr->handle)
427 return -EINVAL;
428
429 if (!acpi_has_method(pr->handle, "_PCT")) {
430 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
431 "ACPI-based processor performance control unavailable\n"));
432 return -ENODEV;
433 }
434
435 result = acpi_processor_get_performance_control(pr);
436 if (result)
437 goto update_bios;
438
439 result = acpi_processor_get_performance_states(pr);
440 if (result)
441 goto update_bios;
442
443 /* We need to call _PPC once when cpufreq starts */
444 if (ignore_ppc != 1)
445 result = acpi_processor_get_platform_limit(pr);
446
447 return result;
448
449 /*
450 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
451 * the BIOS is older than the CPU and does not know its frequencies
452 */
453 update_bios:
454#ifdef CONFIG_X86
455 if (acpi_has_method(pr->handle, "_PPC")) {
456 if(boot_cpu_has(X86_FEATURE_EST))
457 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
458 "frequency support\n");
459 }
460#endif
461 return result;
462}
463EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
464
465int acpi_processor_pstate_control(void)
466{
467 acpi_status status;
468
469 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
470 return 0;
471
472 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
473 "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
474 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
475
476 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
477 (u32)acpi_gbl_FADT.pstate_control, 8);
478 if (ACPI_SUCCESS(status))
479 return 1;
480
481 ACPI_EXCEPTION((AE_INFO, status,
482 "Failed to write pstate_control [0x%x] to smi_command [0x%x]",
483 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
484 return -EIO;
485}
486
487int acpi_processor_notify_smm(struct module *calling_module)
488{
489 static int is_done = 0;
490 int result;
491
492 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
493 return -EBUSY;
494
495 if (!try_module_get(calling_module))
496 return -EINVAL;
497
498 /* is_done is set to negative if an error occurred,
499 * and to postitive if _no_ error occurred, but SMM
500 * was already notified. This avoids double notification
501 * which might lead to unexpected results...
502 */
503 if (is_done > 0) {
504 module_put(calling_module);
505 return 0;
506 } else if (is_done < 0) {
507 module_put(calling_module);
508 return is_done;
509 }
510
511 is_done = -EIO;
512
513 result = acpi_processor_pstate_control();
514 if (!result) {
515 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
516 module_put(calling_module);
517 return 0;
518 }
519 if (result < 0) {
520 module_put(calling_module);
521 return result;
522 }
523
524 /* Success. If there's no _PPC, we need to fear nothing, so
525 * we can allow the cpufreq driver to be rmmod'ed. */
526 is_done = 1;
527
528 if (!(acpi_processor_ppc_status & PPC_IN_USE))
529 module_put(calling_module);
530
531 return 0;
532}
533
534EXPORT_SYMBOL(acpi_processor_notify_smm);
535
536int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
537{
538 int result = 0;
539 acpi_status status = AE_OK;
540 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
541 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
542 struct acpi_buffer state = {0, NULL};
543 union acpi_object *psd = NULL;
544
545 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
546 if (ACPI_FAILURE(status)) {
547 return -ENODEV;
548 }
549
550 psd = buffer.pointer;
551 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
552 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
553 result = -EFAULT;
554 goto end;
555 }
556
557 if (psd->package.count != 1) {
558 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
559 result = -EFAULT;
560 goto end;
561 }
562
563 state.length = sizeof(struct acpi_psd_package);
564 state.pointer = pdomain;
565
566 status = acpi_extract_package(&(psd->package.elements[0]),
567 &format, &state);
568 if (ACPI_FAILURE(status)) {
569 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
570 result = -EFAULT;
571 goto end;
572 }
573
574 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
575 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
576 result = -EFAULT;
577 goto end;
578 }
579
580 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
581 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
582 result = -EFAULT;
583 goto end;
584 }
585
586 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
587 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
588 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
589 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
590 result = -EFAULT;
591 goto end;
592 }
593end:
594 kfree(buffer.pointer);
595 return result;
596}
597EXPORT_SYMBOL(acpi_processor_get_psd);
598
599int acpi_processor_preregister_performance(
600 struct acpi_processor_performance __percpu *performance)
601{
602 int count_target;
603 int retval = 0;
604 unsigned int i, j;
605 cpumask_var_t covered_cpus;
606 struct acpi_processor *pr;
607 struct acpi_psd_package *pdomain;
608 struct acpi_processor *match_pr;
609 struct acpi_psd_package *match_pdomain;
610
611 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
612 return -ENOMEM;
613
614 mutex_lock(&performance_mutex);
615
616 /*
617 * Check if another driver has already registered, and abort before
618 * changing pr->performance if it has. Check input data as well.
619 */
620 for_each_possible_cpu(i) {
621 pr = per_cpu(processors, i);
622 if (!pr) {
623 /* Look only at processors in ACPI namespace */
624 continue;
625 }
626
627 if (pr->performance) {
628 retval = -EBUSY;
629 goto err_out;
630 }
631
632 if (!performance || !per_cpu_ptr(performance, i)) {
633 retval = -EINVAL;
634 goto err_out;
635 }
636 }
637
638 /* Call _PSD for all CPUs */
639 for_each_possible_cpu(i) {
640 pr = per_cpu(processors, i);
641 if (!pr)
642 continue;
643
644 pr->performance = per_cpu_ptr(performance, i);
645 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
646 pdomain = &(pr->performance->domain_info);
647 if (acpi_processor_get_psd(pr->handle, pdomain)) {
648 retval = -EINVAL;
649 continue;
650 }
651 }
652 if (retval)
653 goto err_ret;
654
655 /*
656 * Now that we have _PSD data from all CPUs, lets setup P-state
657 * domain info.
658 */
659 for_each_possible_cpu(i) {
660 pr = per_cpu(processors, i);
661 if (!pr)
662 continue;
663
664 if (cpumask_test_cpu(i, covered_cpus))
665 continue;
666
667 pdomain = &(pr->performance->domain_info);
668 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
669 cpumask_set_cpu(i, covered_cpus);
670 if (pdomain->num_processors <= 1)
671 continue;
672
673 /* Validate the Domain info */
674 count_target = pdomain->num_processors;
675 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
676 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
677 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
678 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
679 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
680 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
681
682 for_each_possible_cpu(j) {
683 if (i == j)
684 continue;
685
686 match_pr = per_cpu(processors, j);
687 if (!match_pr)
688 continue;
689
690 match_pdomain = &(match_pr->performance->domain_info);
691 if (match_pdomain->domain != pdomain->domain)
692 continue;
693
694 /* Here i and j are in the same domain */
695
696 if (match_pdomain->num_processors != count_target) {
697 retval = -EINVAL;
698 goto err_ret;
699 }
700
701 if (pdomain->coord_type != match_pdomain->coord_type) {
702 retval = -EINVAL;
703 goto err_ret;
704 }
705
706 cpumask_set_cpu(j, covered_cpus);
707 cpumask_set_cpu(j, pr->performance->shared_cpu_map);
708 }
709
710 for_each_possible_cpu(j) {
711 if (i == j)
712 continue;
713
714 match_pr = per_cpu(processors, j);
715 if (!match_pr)
716 continue;
717
718 match_pdomain = &(match_pr->performance->domain_info);
719 if (match_pdomain->domain != pdomain->domain)
720 continue;
721
722 match_pr->performance->shared_type =
723 pr->performance->shared_type;
724 cpumask_copy(match_pr->performance->shared_cpu_map,
725 pr->performance->shared_cpu_map);
726 }
727 }
728
729err_ret:
730 for_each_possible_cpu(i) {
731 pr = per_cpu(processors, i);
732 if (!pr || !pr->performance)
733 continue;
734
735 /* Assume no coordination on any error parsing domain info */
736 if (retval) {
737 cpumask_clear(pr->performance->shared_cpu_map);
738 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
739 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
740 }
741 pr->performance = NULL; /* Will be set for real in register */
742 }
743
744err_out:
745 mutex_unlock(&performance_mutex);
746 free_cpumask_var(covered_cpus);
747 return retval;
748}
749EXPORT_SYMBOL(acpi_processor_preregister_performance);
750
751int
752acpi_processor_register_performance(struct acpi_processor_performance
753 *performance, unsigned int cpu)
754{
755 struct acpi_processor *pr;
756
757 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
758 return -EINVAL;
759
760 mutex_lock(&performance_mutex);
761
762 pr = per_cpu(processors, cpu);
763 if (!pr) {
764 mutex_unlock(&performance_mutex);
765 return -ENODEV;
766 }
767
768 if (pr->performance) {
769 mutex_unlock(&performance_mutex);
770 return -EBUSY;
771 }
772
773 WARN_ON(!performance);
774
775 pr->performance = performance;
776
777 if (acpi_processor_get_performance_info(pr)) {
778 pr->performance = NULL;
779 mutex_unlock(&performance_mutex);
780 return -EIO;
781 }
782
783 mutex_unlock(&performance_mutex);
784 return 0;
785}
786
787EXPORT_SYMBOL(acpi_processor_register_performance);
788
789void acpi_processor_unregister_performance(unsigned int cpu)
790{
791 struct acpi_processor *pr;
792
793 mutex_lock(&performance_mutex);
794
795 pr = per_cpu(processors, cpu);
796 if (!pr) {
797 mutex_unlock(&performance_mutex);
798 return;
799 }
800
801 if (pr->performance)
802 kfree(pr->performance->states);
803 pr->performance = NULL;
804
805 mutex_unlock(&performance_mutex);
806
807 return;
808}
809
810EXPORT_SYMBOL(acpi_processor_unregister_performance);