Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 *
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 *
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
13 *
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
16 * operation involves:
17 *
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
19 *
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
22 *
23 * - Platform conveys its decision back to OS
24 *
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
29 *
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
32 */
33
34#define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36#include <linux/cpufreq.h>
37#include <linux/delay.h>
38#include <linux/iopoll.h>
39#include <linux/ktime.h>
40#include <linux/rwsem.h>
41#include <linux/wait.h>
42
43#include <acpi/cppc_acpi.h>
44
45struct cppc_pcc_data {
46 struct mbox_chan *pcc_channel;
47 void __iomem *pcc_comm_addr;
48 bool pcc_channel_acquired;
49 unsigned int deadline_us;
50 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51
52 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
53 bool platform_owns_pcc; /* Ownership of PCC subspace */
54 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
55
56 /*
57 * Lock to provide controlled access to the PCC channel.
58 *
59 * For performance critical usecases(currently cppc_set_perf)
60 * We need to take read_lock and check if channel belongs to OSPM
61 * before reading or writing to PCC subspace
62 * We need to take write_lock before transferring the channel
63 * ownership to the platform via a Doorbell
64 * This allows us to batch a number of CPPC requests if they happen
65 * to originate in about the same time
66 *
67 * For non-performance critical usecases(init)
68 * Take write_lock for all purposes which gives exclusive access
69 */
70 struct rw_semaphore pcc_lock;
71
72 /* Wait queue for CPUs whose requests were batched */
73 wait_queue_head_t pcc_write_wait_q;
74 ktime_t last_cmd_cmpl_time;
75 ktime_t last_mpar_reset;
76 int mpar_count;
77 int refcount;
78};
79
80/* Array to represent the PCC channel per subspace ID */
81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84
85/*
86 * The cpc_desc structure contains the ACPI register details
87 * as described in the per CPU _CPC tables. The details
88 * include the type of register (e.g. PCC, System IO, FFH etc.)
89 * and destination addresses which lets us READ/WRITE CPU performance
90 * information using the appropriate I/O methods.
91 */
92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93
94/* pcc mapped address + header size + offset within PCC subspace */
95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96 0x8 + (offs))
97
98/* Check if a CPC register is in PCC */
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103/* Evalutes to True if reg is a NULL register descriptor */
104#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
109
110/* Evalutes to True if an optional cpc field is supported */
111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114/*
115 * Arbitrary Retries in case the remote processor is slow to respond
116 * to PCC commands. Keeping it high enough to cover emulators where
117 * the processors run painfully slow.
118 */
119#define NUM_RETRIES 500ULL
120
121struct cppc_attr {
122 struct attribute attr;
123 ssize_t (*show)(struct kobject *kobj,
124 struct attribute *attr, char *buf);
125 ssize_t (*store)(struct kobject *kobj,
126 struct attribute *attr, const char *c, ssize_t count);
127};
128
129#define define_one_cppc_ro(_name) \
130static struct cppc_attr _name = \
131__ATTR(_name, 0444, show_##_name, NULL)
132
133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
134
135#define show_cppc_data(access_fn, struct_name, member_name) \
136 static ssize_t show_##member_name(struct kobject *kobj, \
137 struct attribute *attr, char *buf) \
138 { \
139 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
140 struct struct_name st_name = {0}; \
141 int ret; \
142 \
143 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
144 if (ret) \
145 return ret; \
146 \
147 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
148 (u64)st_name.member_name); \
149 } \
150 define_one_cppc_ro(member_name)
151
152show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
153show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
154show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
155show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
158
159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
161
162static ssize_t show_feedback_ctrs(struct kobject *kobj,
163 struct attribute *attr, char *buf)
164{
165 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
166 struct cppc_perf_fb_ctrs fb_ctrs = {0};
167 int ret;
168
169 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
170 if (ret)
171 return ret;
172
173 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
174 fb_ctrs.reference, fb_ctrs.delivered);
175}
176define_one_cppc_ro(feedback_ctrs);
177
178static struct attribute *cppc_attrs[] = {
179 &feedback_ctrs.attr,
180 &reference_perf.attr,
181 &wraparound_time.attr,
182 &highest_perf.attr,
183 &lowest_perf.attr,
184 &lowest_nonlinear_perf.attr,
185 &nominal_perf.attr,
186 &nominal_freq.attr,
187 &lowest_freq.attr,
188 NULL
189};
190
191static struct kobj_type cppc_ktype = {
192 .sysfs_ops = &kobj_sysfs_ops,
193 .default_attrs = cppc_attrs,
194};
195
196static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
197{
198 int ret, status;
199 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
200 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
201 pcc_ss_data->pcc_comm_addr;
202
203 if (!pcc_ss_data->platform_owns_pcc)
204 return 0;
205
206 /*
207 * Poll PCC status register every 3us(delay_us) for maximum of
208 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
209 */
210 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
211 status & PCC_CMD_COMPLETE_MASK, 3,
212 pcc_ss_data->deadline_us);
213
214 if (likely(!ret)) {
215 pcc_ss_data->platform_owns_pcc = false;
216 if (chk_err_bit && (status & PCC_ERROR_MASK))
217 ret = -EIO;
218 }
219
220 if (unlikely(ret))
221 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
222 pcc_ss_id, ret);
223
224 return ret;
225}
226
227/*
228 * This function transfers the ownership of the PCC to the platform
229 * So it must be called while holding write_lock(pcc_lock)
230 */
231static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
232{
233 int ret = -EIO, i;
234 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
235 struct acpi_pcct_shared_memory *generic_comm_base =
236 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
237 unsigned int time_delta;
238
239 /*
240 * For CMD_WRITE we know for a fact the caller should have checked
241 * the channel before writing to PCC space
242 */
243 if (cmd == CMD_READ) {
244 /*
245 * If there are pending cpc_writes, then we stole the channel
246 * before write completion, so first send a WRITE command to
247 * platform
248 */
249 if (pcc_ss_data->pending_pcc_write_cmd)
250 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
251
252 ret = check_pcc_chan(pcc_ss_id, false);
253 if (ret)
254 goto end;
255 } else /* CMD_WRITE */
256 pcc_ss_data->pending_pcc_write_cmd = FALSE;
257
258 /*
259 * Handle the Minimum Request Turnaround Time(MRTT)
260 * "The minimum amount of time that OSPM must wait after the completion
261 * of a command before issuing the next command, in microseconds"
262 */
263 if (pcc_ss_data->pcc_mrtt) {
264 time_delta = ktime_us_delta(ktime_get(),
265 pcc_ss_data->last_cmd_cmpl_time);
266 if (pcc_ss_data->pcc_mrtt > time_delta)
267 udelay(pcc_ss_data->pcc_mrtt - time_delta);
268 }
269
270 /*
271 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
272 * "The maximum number of periodic requests that the subspace channel can
273 * support, reported in commands per minute. 0 indicates no limitation."
274 *
275 * This parameter should be ideally zero or large enough so that it can
276 * handle maximum number of requests that all the cores in the system can
277 * collectively generate. If it is not, we will follow the spec and just
278 * not send the request to the platform after hitting the MPAR limit in
279 * any 60s window
280 */
281 if (pcc_ss_data->pcc_mpar) {
282 if (pcc_ss_data->mpar_count == 0) {
283 time_delta = ktime_ms_delta(ktime_get(),
284 pcc_ss_data->last_mpar_reset);
285 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
286 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
287 pcc_ss_id);
288 ret = -EIO;
289 goto end;
290 }
291 pcc_ss_data->last_mpar_reset = ktime_get();
292 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
293 }
294 pcc_ss_data->mpar_count--;
295 }
296
297 /* Write to the shared comm region. */
298 writew_relaxed(cmd, &generic_comm_base->command);
299
300 /* Flip CMD COMPLETE bit */
301 writew_relaxed(0, &generic_comm_base->status);
302
303 pcc_ss_data->platform_owns_pcc = true;
304
305 /* Ring doorbell */
306 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
307 if (ret < 0) {
308 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
309 pcc_ss_id, cmd, ret);
310 goto end;
311 }
312
313 /* wait for completion and check for PCC errro bit */
314 ret = check_pcc_chan(pcc_ss_id, true);
315
316 if (pcc_ss_data->pcc_mrtt)
317 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
318
319 if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
320 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
321 else
322 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
323
324end:
325 if (cmd == CMD_WRITE) {
326 if (unlikely(ret)) {
327 for_each_possible_cpu(i) {
328 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
329 if (!desc)
330 continue;
331
332 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
333 desc->write_cmd_status = ret;
334 }
335 }
336 pcc_ss_data->pcc_write_cnt++;
337 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
338 }
339
340 return ret;
341}
342
343static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
344{
345 if (ret < 0)
346 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
347 *(u16 *)msg, ret);
348 else
349 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
350 *(u16 *)msg, ret);
351}
352
353struct mbox_client cppc_mbox_cl = {
354 .tx_done = cppc_chan_tx_done,
355 .knows_txdone = true,
356};
357
358static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
359{
360 int result = -EFAULT;
361 acpi_status status = AE_OK;
362 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
363 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
364 struct acpi_buffer state = {0, NULL};
365 union acpi_object *psd = NULL;
366 struct acpi_psd_package *pdomain;
367
368 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
369 &buffer, ACPI_TYPE_PACKAGE);
370 if (status == AE_NOT_FOUND) /* _PSD is optional */
371 return 0;
372 if (ACPI_FAILURE(status))
373 return -ENODEV;
374
375 psd = buffer.pointer;
376 if (!psd || psd->package.count != 1) {
377 pr_debug("Invalid _PSD data\n");
378 goto end;
379 }
380
381 pdomain = &(cpc_ptr->domain_info);
382
383 state.length = sizeof(struct acpi_psd_package);
384 state.pointer = pdomain;
385
386 status = acpi_extract_package(&(psd->package.elements[0]),
387 &format, &state);
388 if (ACPI_FAILURE(status)) {
389 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
390 goto end;
391 }
392
393 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
394 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
395 goto end;
396 }
397
398 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
399 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
400 goto end;
401 }
402
403 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
404 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
405 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
406 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
407 goto end;
408 }
409
410 result = 0;
411end:
412 kfree(buffer.pointer);
413 return result;
414}
415
416/**
417 * acpi_get_psd_map - Map the CPUs in a common freq domain.
418 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
419 *
420 * Return: 0 for success or negative value for err.
421 */
422int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
423{
424 int count_target;
425 int retval = 0;
426 unsigned int i, j;
427 cpumask_var_t covered_cpus;
428 struct cppc_cpudata *pr, *match_pr;
429 struct acpi_psd_package *pdomain;
430 struct acpi_psd_package *match_pdomain;
431 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
432
433 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
434 return -ENOMEM;
435
436 /*
437 * Now that we have _PSD data from all CPUs, let's setup P-state
438 * domain info.
439 */
440 for_each_possible_cpu(i) {
441 pr = all_cpu_data[i];
442 if (!pr)
443 continue;
444
445 if (cpumask_test_cpu(i, covered_cpus))
446 continue;
447
448 cpc_ptr = per_cpu(cpc_desc_ptr, i);
449 if (!cpc_ptr) {
450 retval = -EFAULT;
451 goto err_ret;
452 }
453
454 pdomain = &(cpc_ptr->domain_info);
455 cpumask_set_cpu(i, pr->shared_cpu_map);
456 cpumask_set_cpu(i, covered_cpus);
457 if (pdomain->num_processors <= 1)
458 continue;
459
460 /* Validate the Domain info */
461 count_target = pdomain->num_processors;
462 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
463 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
464 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
465 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
466 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
467 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
468
469 for_each_possible_cpu(j) {
470 if (i == j)
471 continue;
472
473 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
474 if (!match_cpc_ptr) {
475 retval = -EFAULT;
476 goto err_ret;
477 }
478
479 match_pdomain = &(match_cpc_ptr->domain_info);
480 if (match_pdomain->domain != pdomain->domain)
481 continue;
482
483 /* Here i and j are in the same domain */
484 if (match_pdomain->num_processors != count_target) {
485 retval = -EFAULT;
486 goto err_ret;
487 }
488
489 if (pdomain->coord_type != match_pdomain->coord_type) {
490 retval = -EFAULT;
491 goto err_ret;
492 }
493
494 cpumask_set_cpu(j, covered_cpus);
495 cpumask_set_cpu(j, pr->shared_cpu_map);
496 }
497
498 for_each_possible_cpu(j) {
499 if (i == j)
500 continue;
501
502 match_pr = all_cpu_data[j];
503 if (!match_pr)
504 continue;
505
506 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
507 if (!match_cpc_ptr) {
508 retval = -EFAULT;
509 goto err_ret;
510 }
511
512 match_pdomain = &(match_cpc_ptr->domain_info);
513 if (match_pdomain->domain != pdomain->domain)
514 continue;
515
516 match_pr->shared_type = pr->shared_type;
517 cpumask_copy(match_pr->shared_cpu_map,
518 pr->shared_cpu_map);
519 }
520 }
521
522err_ret:
523 for_each_possible_cpu(i) {
524 pr = all_cpu_data[i];
525 if (!pr)
526 continue;
527
528 /* Assume no coordination on any error parsing domain info */
529 if (retval) {
530 cpumask_clear(pr->shared_cpu_map);
531 cpumask_set_cpu(i, pr->shared_cpu_map);
532 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
533 }
534 }
535
536 free_cpumask_var(covered_cpus);
537 return retval;
538}
539EXPORT_SYMBOL_GPL(acpi_get_psd_map);
540
541static int register_pcc_channel(int pcc_ss_idx)
542{
543 struct acpi_pcct_hw_reduced *cppc_ss;
544 u64 usecs_lat;
545
546 if (pcc_ss_idx >= 0) {
547 pcc_data[pcc_ss_idx]->pcc_channel =
548 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
549
550 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
551 pr_err("Failed to find PCC channel for subspace %d\n",
552 pcc_ss_idx);
553 return -ENODEV;
554 }
555
556 /*
557 * The PCC mailbox controller driver should
558 * have parsed the PCCT (global table of all
559 * PCC channels) and stored pointers to the
560 * subspace communication region in con_priv.
561 */
562 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
563
564 if (!cppc_ss) {
565 pr_err("No PCC subspace found for %d CPPC\n",
566 pcc_ss_idx);
567 return -ENODEV;
568 }
569
570 /*
571 * cppc_ss->latency is just a Nominal value. In reality
572 * the remote processor could be much slower to reply.
573 * So add an arbitrary amount of wait on top of Nominal.
574 */
575 usecs_lat = NUM_RETRIES * cppc_ss->latency;
576 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
577 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
578 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
579 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
580
581 pcc_data[pcc_ss_idx]->pcc_comm_addr =
582 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
583 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
584 pr_err("Failed to ioremap PCC comm region mem for %d\n",
585 pcc_ss_idx);
586 return -ENOMEM;
587 }
588
589 /* Set flag so that we don't come here for each CPU. */
590 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
591 }
592
593 return 0;
594}
595
596/**
597 * cpc_ffh_supported() - check if FFH reading supported
598 *
599 * Check if the architecture has support for functional fixed hardware
600 * read/write capability.
601 *
602 * Return: true for supported, false for not supported
603 */
604bool __weak cpc_ffh_supported(void)
605{
606 return false;
607}
608
609/**
610 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
611 *
612 * Check and allocate the cppc_pcc_data memory.
613 * In some processor configurations it is possible that same subspace
614 * is shared between multiple CPUs. This is seen especially in CPUs
615 * with hardware multi-threading support.
616 *
617 * Return: 0 for success, errno for failure
618 */
619int pcc_data_alloc(int pcc_ss_id)
620{
621 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
622 return -EINVAL;
623
624 if (pcc_data[pcc_ss_id]) {
625 pcc_data[pcc_ss_id]->refcount++;
626 } else {
627 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
628 GFP_KERNEL);
629 if (!pcc_data[pcc_ss_id])
630 return -ENOMEM;
631 pcc_data[pcc_ss_id]->refcount++;
632 }
633
634 return 0;
635}
636
637/* Check if CPPC revision + num_ent combination is supported */
638static bool is_cppc_supported(int revision, int num_ent)
639{
640 int expected_num_ent;
641
642 switch (revision) {
643 case CPPC_V2_REV:
644 expected_num_ent = CPPC_V2_NUM_ENT;
645 break;
646 case CPPC_V3_REV:
647 expected_num_ent = CPPC_V3_NUM_ENT;
648 break;
649 default:
650 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
651 revision);
652 return false;
653 }
654
655 if (expected_num_ent != num_ent) {
656 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
657 num_ent, expected_num_ent, revision);
658 return false;
659 }
660
661 return true;
662}
663
664/*
665 * An example CPC table looks like the following.
666 *
667 * Name(_CPC, Package()
668 * {
669 * 17,
670 * NumEntries
671 * 1,
672 * // Revision
673 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
674 * // Highest Performance
675 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
676 * // Nominal Performance
677 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
678 * // Lowest Nonlinear Performance
679 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
680 * // Lowest Performance
681 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
682 * // Guaranteed Performance Register
683 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
684 * // Desired Performance Register
685 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
686 * ..
687 * ..
688 * ..
689 *
690 * }
691 * Each Register() encodes how to access that specific register.
692 * e.g. a sample PCC entry has the following encoding:
693 *
694 * Register (
695 * PCC,
696 * AddressSpaceKeyword
697 * 8,
698 * //RegisterBitWidth
699 * 8,
700 * //RegisterBitOffset
701 * 0x30,
702 * //RegisterAddress
703 * 9
704 * //AccessSize (subspace ID)
705 * 0
706 * )
707 * }
708 */
709
710/**
711 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
712 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
713 *
714 * Return: 0 for success or negative value for err.
715 */
716int acpi_cppc_processor_probe(struct acpi_processor *pr)
717{
718 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
719 union acpi_object *out_obj, *cpc_obj;
720 struct cpc_desc *cpc_ptr;
721 struct cpc_reg *gas_t;
722 struct device *cpu_dev;
723 acpi_handle handle = pr->handle;
724 unsigned int num_ent, i, cpc_rev;
725 int pcc_subspace_id = -1;
726 acpi_status status;
727 int ret = -EFAULT;
728
729 /* Parse the ACPI _CPC table for this CPU. */
730 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
731 ACPI_TYPE_PACKAGE);
732 if (ACPI_FAILURE(status)) {
733 ret = -ENODEV;
734 goto out_buf_free;
735 }
736
737 out_obj = (union acpi_object *) output.pointer;
738
739 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
740 if (!cpc_ptr) {
741 ret = -ENOMEM;
742 goto out_buf_free;
743 }
744
745 /* First entry is NumEntries. */
746 cpc_obj = &out_obj->package.elements[0];
747 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
748 num_ent = cpc_obj->integer.value;
749 } else {
750 pr_debug("Unexpected entry type(%d) for NumEntries\n",
751 cpc_obj->type);
752 goto out_free;
753 }
754 cpc_ptr->num_entries = num_ent;
755
756 /* Second entry should be revision. */
757 cpc_obj = &out_obj->package.elements[1];
758 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
759 cpc_rev = cpc_obj->integer.value;
760 } else {
761 pr_debug("Unexpected entry type(%d) for Revision\n",
762 cpc_obj->type);
763 goto out_free;
764 }
765 cpc_ptr->version = cpc_rev;
766
767 if (!is_cppc_supported(cpc_rev, num_ent))
768 goto out_free;
769
770 /* Iterate through remaining entries in _CPC */
771 for (i = 2; i < num_ent; i++) {
772 cpc_obj = &out_obj->package.elements[i];
773
774 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
775 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
776 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
777 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
778 gas_t = (struct cpc_reg *)
779 cpc_obj->buffer.pointer;
780
781 /*
782 * The PCC Subspace index is encoded inside
783 * the CPC table entries. The same PCC index
784 * will be used for all the PCC entries,
785 * so extract it only once.
786 */
787 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
788 if (pcc_subspace_id < 0) {
789 pcc_subspace_id = gas_t->access_width;
790 if (pcc_data_alloc(pcc_subspace_id))
791 goto out_free;
792 } else if (pcc_subspace_id != gas_t->access_width) {
793 pr_debug("Mismatched PCC ids.\n");
794 goto out_free;
795 }
796 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
797 if (gas_t->address) {
798 void __iomem *addr;
799
800 addr = ioremap(gas_t->address, gas_t->bit_width/8);
801 if (!addr)
802 goto out_free;
803 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
804 }
805 } else {
806 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
807 /* Support only PCC ,SYS MEM and FFH type regs */
808 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
809 goto out_free;
810 }
811 }
812
813 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
814 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
815 } else {
816 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
817 goto out_free;
818 }
819 }
820 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
821
822 /*
823 * Initialize the remaining cpc_regs as unsupported.
824 * Example: In case FW exposes CPPC v2, the below loop will initialize
825 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
826 */
827 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
828 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
829 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
830 }
831
832
833 /* Store CPU Logical ID */
834 cpc_ptr->cpu_id = pr->id;
835
836 /* Parse PSD data for this CPU */
837 ret = acpi_get_psd(cpc_ptr, handle);
838 if (ret)
839 goto out_free;
840
841 /* Register PCC channel once for all PCC subspace ID. */
842 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
843 ret = register_pcc_channel(pcc_subspace_id);
844 if (ret)
845 goto out_free;
846
847 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
848 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
849 }
850
851 /* Everything looks okay */
852 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
853
854 /* Add per logical CPU nodes for reading its feedback counters. */
855 cpu_dev = get_cpu_device(pr->id);
856 if (!cpu_dev) {
857 ret = -EINVAL;
858 goto out_free;
859 }
860
861 /* Plug PSD data into this CPU's CPC descriptor. */
862 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
863
864 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
865 "acpi_cppc");
866 if (ret) {
867 per_cpu(cpc_desc_ptr, pr->id) = NULL;
868 goto out_free;
869 }
870
871 kfree(output.pointer);
872 return 0;
873
874out_free:
875 /* Free all the mapped sys mem areas for this CPU */
876 for (i = 2; i < cpc_ptr->num_entries; i++) {
877 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
878
879 if (addr)
880 iounmap(addr);
881 }
882 kfree(cpc_ptr);
883
884out_buf_free:
885 kfree(output.pointer);
886 return ret;
887}
888EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
889
890/**
891 * acpi_cppc_processor_exit - Cleanup CPC structs.
892 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
893 *
894 * Return: Void
895 */
896void acpi_cppc_processor_exit(struct acpi_processor *pr)
897{
898 struct cpc_desc *cpc_ptr;
899 unsigned int i;
900 void __iomem *addr;
901 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
902
903 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
904 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
905 pcc_data[pcc_ss_id]->refcount--;
906 if (!pcc_data[pcc_ss_id]->refcount) {
907 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
908 kfree(pcc_data[pcc_ss_id]);
909 pcc_data[pcc_ss_id] = NULL;
910 }
911 }
912 }
913
914 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
915 if (!cpc_ptr)
916 return;
917
918 /* Free all the mapped sys mem areas for this CPU */
919 for (i = 2; i < cpc_ptr->num_entries; i++) {
920 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
921 if (addr)
922 iounmap(addr);
923 }
924
925 kobject_put(&cpc_ptr->kobj);
926 kfree(cpc_ptr);
927}
928EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
929
930/**
931 * cpc_read_ffh() - Read FFH register
932 * @cpunum: CPU number to read
933 * @reg: cppc register information
934 * @val: place holder for return value
935 *
936 * Read bit_width bits from a specified address and bit_offset
937 *
938 * Return: 0 for success and error code
939 */
940int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
941{
942 return -ENOTSUPP;
943}
944
945/**
946 * cpc_write_ffh() - Write FFH register
947 * @cpunum: CPU number to write
948 * @reg: cppc register information
949 * @val: value to write
950 *
951 * Write value of bit_width bits to a specified address and bit_offset
952 *
953 * Return: 0 for success and error code
954 */
955int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
956{
957 return -ENOTSUPP;
958}
959
960/*
961 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
962 * as fast as possible. We have already mapped the PCC subspace during init, so
963 * we can directly write to it.
964 */
965
966static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
967{
968 int ret_val = 0;
969 void __iomem *vaddr = 0;
970 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
971 struct cpc_reg *reg = ®_res->cpc_entry.reg;
972
973 if (reg_res->type == ACPI_TYPE_INTEGER) {
974 *val = reg_res->cpc_entry.int_value;
975 return ret_val;
976 }
977
978 *val = 0;
979 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
980 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
981 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
982 vaddr = reg_res->sys_mem_vaddr;
983 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
984 return cpc_read_ffh(cpu, reg, val);
985 else
986 return acpi_os_read_memory((acpi_physical_address)reg->address,
987 val, reg->bit_width);
988
989 switch (reg->bit_width) {
990 case 8:
991 *val = readb_relaxed(vaddr);
992 break;
993 case 16:
994 *val = readw_relaxed(vaddr);
995 break;
996 case 32:
997 *val = readl_relaxed(vaddr);
998 break;
999 case 64:
1000 *val = readq_relaxed(vaddr);
1001 break;
1002 default:
1003 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1004 reg->bit_width, pcc_ss_id);
1005 ret_val = -EFAULT;
1006 }
1007
1008 return ret_val;
1009}
1010
1011static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1012{
1013 int ret_val = 0;
1014 void __iomem *vaddr = 0;
1015 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1016 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1017
1018 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1019 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1020 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1021 vaddr = reg_res->sys_mem_vaddr;
1022 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1023 return cpc_write_ffh(cpu, reg, val);
1024 else
1025 return acpi_os_write_memory((acpi_physical_address)reg->address,
1026 val, reg->bit_width);
1027
1028 switch (reg->bit_width) {
1029 case 8:
1030 writeb_relaxed(val, vaddr);
1031 break;
1032 case 16:
1033 writew_relaxed(val, vaddr);
1034 break;
1035 case 32:
1036 writel_relaxed(val, vaddr);
1037 break;
1038 case 64:
1039 writeq_relaxed(val, vaddr);
1040 break;
1041 default:
1042 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1043 reg->bit_width, pcc_ss_id);
1044 ret_val = -EFAULT;
1045 break;
1046 }
1047
1048 return ret_val;
1049}
1050
1051/**
1052 * cppc_get_desired_perf - Get the value of desired performance register.
1053 * @cpunum: CPU from which to get desired performance.
1054 * @desired_perf: address of a variable to store the returned desired performance
1055 *
1056 * Return: 0 for success, -EIO otherwise.
1057 */
1058int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1059{
1060 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1061 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1062 struct cpc_register_resource *desired_reg;
1063 struct cppc_pcc_data *pcc_ss_data = NULL;
1064
1065 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1066
1067 if (CPC_IN_PCC(desired_reg)) {
1068 int ret = 0;
1069
1070 if (pcc_ss_id < 0)
1071 return -EIO;
1072
1073 pcc_ss_data = pcc_data[pcc_ss_id];
1074
1075 down_write(&pcc_ss_data->pcc_lock);
1076
1077 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1078 cpc_read(cpunum, desired_reg, desired_perf);
1079 else
1080 ret = -EIO;
1081
1082 up_write(&pcc_ss_data->pcc_lock);
1083
1084 return ret;
1085 }
1086
1087 cpc_read(cpunum, desired_reg, desired_perf);
1088
1089 return 0;
1090}
1091EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1092
1093/**
1094 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1095 * @cpunum: CPU from which to get capabilities info.
1096 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1097 *
1098 * Return: 0 for success with perf_caps populated else -ERRNO.
1099 */
1100int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1101{
1102 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1103 struct cpc_register_resource *highest_reg, *lowest_reg,
1104 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1105 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1106 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1107 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1108 struct cppc_pcc_data *pcc_ss_data = NULL;
1109 int ret = 0, regs_in_pcc = 0;
1110
1111 if (!cpc_desc) {
1112 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1113 return -ENODEV;
1114 }
1115
1116 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1117 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1118 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1119 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1120 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1121 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1122 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1123
1124 /* Are any of the regs PCC ?*/
1125 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1126 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1127 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1128 if (pcc_ss_id < 0) {
1129 pr_debug("Invalid pcc_ss_id\n");
1130 return -ENODEV;
1131 }
1132 pcc_ss_data = pcc_data[pcc_ss_id];
1133 regs_in_pcc = 1;
1134 down_write(&pcc_ss_data->pcc_lock);
1135 /* Ring doorbell once to update PCC subspace */
1136 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1137 ret = -EIO;
1138 goto out_err;
1139 }
1140 }
1141
1142 cpc_read(cpunum, highest_reg, &high);
1143 perf_caps->highest_perf = high;
1144
1145 cpc_read(cpunum, lowest_reg, &low);
1146 perf_caps->lowest_perf = low;
1147
1148 cpc_read(cpunum, nominal_reg, &nom);
1149 perf_caps->nominal_perf = nom;
1150
1151 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1152 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1153 perf_caps->guaranteed_perf = 0;
1154 } else {
1155 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1156 perf_caps->guaranteed_perf = guaranteed;
1157 }
1158
1159 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1160 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1161
1162 if (!high || !low || !nom || !min_nonlinear)
1163 ret = -EFAULT;
1164
1165 /* Read optional lowest and nominal frequencies if present */
1166 if (CPC_SUPPORTED(low_freq_reg))
1167 cpc_read(cpunum, low_freq_reg, &low_f);
1168
1169 if (CPC_SUPPORTED(nom_freq_reg))
1170 cpc_read(cpunum, nom_freq_reg, &nom_f);
1171
1172 perf_caps->lowest_freq = low_f;
1173 perf_caps->nominal_freq = nom_f;
1174
1175
1176out_err:
1177 if (regs_in_pcc)
1178 up_write(&pcc_ss_data->pcc_lock);
1179 return ret;
1180}
1181EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1182
1183/**
1184 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1185 * @cpunum: CPU from which to read counters.
1186 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1187 *
1188 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1189 */
1190int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1191{
1192 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1193 struct cpc_register_resource *delivered_reg, *reference_reg,
1194 *ref_perf_reg, *ctr_wrap_reg;
1195 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1196 struct cppc_pcc_data *pcc_ss_data = NULL;
1197 u64 delivered, reference, ref_perf, ctr_wrap_time;
1198 int ret = 0, regs_in_pcc = 0;
1199
1200 if (!cpc_desc) {
1201 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1202 return -ENODEV;
1203 }
1204
1205 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1206 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1207 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1208 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1209
1210 /*
1211 * If reference perf register is not supported then we should
1212 * use the nominal perf value
1213 */
1214 if (!CPC_SUPPORTED(ref_perf_reg))
1215 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1216
1217 /* Are any of the regs PCC ?*/
1218 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1219 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1220 if (pcc_ss_id < 0) {
1221 pr_debug("Invalid pcc_ss_id\n");
1222 return -ENODEV;
1223 }
1224 pcc_ss_data = pcc_data[pcc_ss_id];
1225 down_write(&pcc_ss_data->pcc_lock);
1226 regs_in_pcc = 1;
1227 /* Ring doorbell once to update PCC subspace */
1228 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1229 ret = -EIO;
1230 goto out_err;
1231 }
1232 }
1233
1234 cpc_read(cpunum, delivered_reg, &delivered);
1235 cpc_read(cpunum, reference_reg, &reference);
1236 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1237
1238 /*
1239 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1240 * performance counters are assumed to never wrap during the lifetime of
1241 * platform
1242 */
1243 ctr_wrap_time = (u64)(~((u64)0));
1244 if (CPC_SUPPORTED(ctr_wrap_reg))
1245 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1246
1247 if (!delivered || !reference || !ref_perf) {
1248 ret = -EFAULT;
1249 goto out_err;
1250 }
1251
1252 perf_fb_ctrs->delivered = delivered;
1253 perf_fb_ctrs->reference = reference;
1254 perf_fb_ctrs->reference_perf = ref_perf;
1255 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1256out_err:
1257 if (regs_in_pcc)
1258 up_write(&pcc_ss_data->pcc_lock);
1259 return ret;
1260}
1261EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1262
1263/**
1264 * cppc_set_perf - Set a CPU's performance controls.
1265 * @cpu: CPU for which to set performance controls.
1266 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1267 *
1268 * Return: 0 for success, -ERRNO otherwise.
1269 */
1270int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1271{
1272 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1273 struct cpc_register_resource *desired_reg;
1274 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1275 struct cppc_pcc_data *pcc_ss_data = NULL;
1276 int ret = 0;
1277
1278 if (!cpc_desc) {
1279 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1280 return -ENODEV;
1281 }
1282
1283 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1284
1285 /*
1286 * This is Phase-I where we want to write to CPC registers
1287 * -> We want all CPUs to be able to execute this phase in parallel
1288 *
1289 * Since read_lock can be acquired by multiple CPUs simultaneously we
1290 * achieve that goal here
1291 */
1292 if (CPC_IN_PCC(desired_reg)) {
1293 if (pcc_ss_id < 0) {
1294 pr_debug("Invalid pcc_ss_id\n");
1295 return -ENODEV;
1296 }
1297 pcc_ss_data = pcc_data[pcc_ss_id];
1298 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1299 if (pcc_ss_data->platform_owns_pcc) {
1300 ret = check_pcc_chan(pcc_ss_id, false);
1301 if (ret) {
1302 up_read(&pcc_ss_data->pcc_lock);
1303 return ret;
1304 }
1305 }
1306 /*
1307 * Update the pending_write to make sure a PCC CMD_READ will not
1308 * arrive and steal the channel during the switch to write lock
1309 */
1310 pcc_ss_data->pending_pcc_write_cmd = true;
1311 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1312 cpc_desc->write_cmd_status = 0;
1313 }
1314
1315 /*
1316 * Skip writing MIN/MAX until Linux knows how to come up with
1317 * useful values.
1318 */
1319 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1320
1321 if (CPC_IN_PCC(desired_reg))
1322 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1323 /*
1324 * This is Phase-II where we transfer the ownership of PCC to Platform
1325 *
1326 * Short Summary: Basically if we think of a group of cppc_set_perf
1327 * requests that happened in short overlapping interval. The last CPU to
1328 * come out of Phase-I will enter Phase-II and ring the doorbell.
1329 *
1330 * We have the following requirements for Phase-II:
1331 * 1. We want to execute Phase-II only when there are no CPUs
1332 * currently executing in Phase-I
1333 * 2. Once we start Phase-II we want to avoid all other CPUs from
1334 * entering Phase-I.
1335 * 3. We want only one CPU among all those who went through Phase-I
1336 * to run phase-II
1337 *
1338 * If write_trylock fails to get the lock and doesn't transfer the
1339 * PCC ownership to the platform, then one of the following will be TRUE
1340 * 1. There is at-least one CPU in Phase-I which will later execute
1341 * write_trylock, so the CPUs in Phase-I will be responsible for
1342 * executing the Phase-II.
1343 * 2. Some other CPU has beaten this CPU to successfully execute the
1344 * write_trylock and has already acquired the write_lock. We know for a
1345 * fact it (other CPU acquiring the write_lock) couldn't have happened
1346 * before this CPU's Phase-I as we held the read_lock.
1347 * 3. Some other CPU executing pcc CMD_READ has stolen the
1348 * down_write, in which case, send_pcc_cmd will check for pending
1349 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1350 * So this CPU can be certain that its request will be delivered
1351 * So in all cases, this CPU knows that its request will be delivered
1352 * by another CPU and can return
1353 *
1354 * After getting the down_write we still need to check for
1355 * pending_pcc_write_cmd to take care of the following scenario
1356 * The thread running this code could be scheduled out between
1357 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1358 * could have delivered the request to Platform by triggering the
1359 * doorbell and transferred the ownership of PCC to platform. So this
1360 * avoids triggering an unnecessary doorbell and more importantly before
1361 * triggering the doorbell it makes sure that the PCC channel ownership
1362 * is still with OSPM.
1363 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1364 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1365 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1366 * case during a CMD_READ and if there are pending writes it delivers
1367 * the write command before servicing the read command
1368 */
1369 if (CPC_IN_PCC(desired_reg)) {
1370 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1371 /* Update only if there are pending write commands */
1372 if (pcc_ss_data->pending_pcc_write_cmd)
1373 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1374 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1375 } else
1376 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1377 wait_event(pcc_ss_data->pcc_write_wait_q,
1378 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1379
1380 /* send_pcc_cmd updates the status in case of failure */
1381 ret = cpc_desc->write_cmd_status;
1382 }
1383 return ret;
1384}
1385EXPORT_SYMBOL_GPL(cppc_set_perf);
1386
1387/**
1388 * cppc_get_transition_latency - returns frequency transition latency in ns
1389 *
1390 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1391 * transition latency for perfromance change requests. The closest we have
1392 * is the timing information from the PCCT tables which provides the info
1393 * on the number and frequency of PCC commands the platform can handle.
1394 */
1395unsigned int cppc_get_transition_latency(int cpu_num)
1396{
1397 /*
1398 * Expected transition latency is based on the PCCT timing values
1399 * Below are definition from ACPI spec:
1400 * pcc_nominal- Expected latency to process a command, in microseconds
1401 * pcc_mpar - The maximum number of periodic requests that the subspace
1402 * channel can support, reported in commands per minute. 0
1403 * indicates no limitation.
1404 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1405 * completion of a command before issuing the next command,
1406 * in microseconds.
1407 */
1408 unsigned int latency_ns = 0;
1409 struct cpc_desc *cpc_desc;
1410 struct cpc_register_resource *desired_reg;
1411 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1412 struct cppc_pcc_data *pcc_ss_data;
1413
1414 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1415 if (!cpc_desc)
1416 return CPUFREQ_ETERNAL;
1417
1418 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1419 if (!CPC_IN_PCC(desired_reg))
1420 return CPUFREQ_ETERNAL;
1421
1422 if (pcc_ss_id < 0)
1423 return CPUFREQ_ETERNAL;
1424
1425 pcc_ss_data = pcc_data[pcc_ss_id];
1426 if (pcc_ss_data->pcc_mpar)
1427 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1428
1429 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1430 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1431
1432 return latency_ns;
1433}
1434EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1/*
2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3 *
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
17 *
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
20 * operation involves:
21 *
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
23 *
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
26 *
27 * - Platform conveys its decision back to OS
28 *
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
33 *
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
36 */
37
38#define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40#include <linux/cpufreq.h>
41#include <linux/delay.h>
42#include <linux/ktime.h>
43#include <linux/rwsem.h>
44#include <linux/wait.h>
45
46#include <acpi/cppc_acpi.h>
47
48struct cppc_pcc_data {
49 struct mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 int pcc_subspace_idx;
52 bool pcc_channel_acquired;
53 ktime_t deadline;
54 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
55
56 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
57 bool platform_owns_pcc; /* Ownership of PCC subspace */
58 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
59
60 /*
61 * Lock to provide controlled access to the PCC channel.
62 *
63 * For performance critical usecases(currently cppc_set_perf)
64 * We need to take read_lock and check if channel belongs to OSPM
65 * before reading or writing to PCC subspace
66 * We need to take write_lock before transferring the channel
67 * ownership to the platform via a Doorbell
68 * This allows us to batch a number of CPPC requests if they happen
69 * to originate in about the same time
70 *
71 * For non-performance critical usecases(init)
72 * Take write_lock for all purposes which gives exclusive access
73 */
74 struct rw_semaphore pcc_lock;
75
76 /* Wait queue for CPUs whose requests were batched */
77 wait_queue_head_t pcc_write_wait_q;
78};
79
80/* Structure to represent the single PCC channel */
81static struct cppc_pcc_data pcc_data = {
82 .pcc_subspace_idx = -1,
83 .platform_owns_pcc = true,
84};
85
86/*
87 * The cpc_desc structure contains the ACPI register details
88 * as described in the per CPU _CPC tables. The details
89 * include the type of register (e.g. PCC, System IO, FFH etc.)
90 * and destination addresses which lets us READ/WRITE CPU performance
91 * information using the appropriate I/O methods.
92 */
93static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
94
95/* pcc mapped address + header size + offset within PCC subspace */
96#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
97
98/* Check if a CPC regsiter is in PCC */
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103/* Evalutes to True if reg is a NULL register descriptor */
104#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
109
110/* Evalutes to True if an optional cpc field is supported */
111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114/*
115 * Arbitrary Retries in case the remote processor is slow to respond
116 * to PCC commands. Keeping it high enough to cover emulators where
117 * the processors run painfully slow.
118 */
119#define NUM_RETRIES 500
120
121struct cppc_attr {
122 struct attribute attr;
123 ssize_t (*show)(struct kobject *kobj,
124 struct attribute *attr, char *buf);
125 ssize_t (*store)(struct kobject *kobj,
126 struct attribute *attr, const char *c, ssize_t count);
127};
128
129#define define_one_cppc_ro(_name) \
130static struct cppc_attr _name = \
131__ATTR(_name, 0444, show_##_name, NULL)
132
133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
134
135static ssize_t show_feedback_ctrs(struct kobject *kobj,
136 struct attribute *attr, char *buf)
137{
138 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
139 struct cppc_perf_fb_ctrs fb_ctrs = {0};
140
141 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
142
143 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
144 fb_ctrs.reference, fb_ctrs.delivered);
145}
146define_one_cppc_ro(feedback_ctrs);
147
148static ssize_t show_reference_perf(struct kobject *kobj,
149 struct attribute *attr, char *buf)
150{
151 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
152 struct cppc_perf_fb_ctrs fb_ctrs = {0};
153
154 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
155
156 return scnprintf(buf, PAGE_SIZE, "%llu\n",
157 fb_ctrs.reference_perf);
158}
159define_one_cppc_ro(reference_perf);
160
161static ssize_t show_wraparound_time(struct kobject *kobj,
162 struct attribute *attr, char *buf)
163{
164 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
165 struct cppc_perf_fb_ctrs fb_ctrs = {0};
166
167 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
168
169 return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time);
170
171}
172define_one_cppc_ro(wraparound_time);
173
174static struct attribute *cppc_attrs[] = {
175 &feedback_ctrs.attr,
176 &reference_perf.attr,
177 &wraparound_time.attr,
178 NULL
179};
180
181static struct kobj_type cppc_ktype = {
182 .sysfs_ops = &kobj_sysfs_ops,
183 .default_attrs = cppc_attrs,
184};
185
186static int check_pcc_chan(bool chk_err_bit)
187{
188 int ret = -EIO, status = 0;
189 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
190 ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
191
192 if (!pcc_data.platform_owns_pcc)
193 return 0;
194
195 /* Retry in case the remote processor was too slow to catch up. */
196 while (!ktime_after(ktime_get(), next_deadline)) {
197 /*
198 * Per spec, prior to boot the PCC space wil be initialized by
199 * platform and should have set the command completion bit when
200 * PCC can be used by OSPM
201 */
202 status = readw_relaxed(&generic_comm_base->status);
203 if (status & PCC_CMD_COMPLETE_MASK) {
204 ret = 0;
205 if (chk_err_bit && (status & PCC_ERROR_MASK))
206 ret = -EIO;
207 break;
208 }
209 /*
210 * Reducing the bus traffic in case this loop takes longer than
211 * a few retries.
212 */
213 udelay(3);
214 }
215
216 if (likely(!ret))
217 pcc_data.platform_owns_pcc = false;
218 else
219 pr_err("PCC check channel failed. Status=%x\n", status);
220
221 return ret;
222}
223
224/*
225 * This function transfers the ownership of the PCC to the platform
226 * So it must be called while holding write_lock(pcc_lock)
227 */
228static int send_pcc_cmd(u16 cmd)
229{
230 int ret = -EIO, i;
231 struct acpi_pcct_shared_memory *generic_comm_base =
232 (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
233 static ktime_t last_cmd_cmpl_time, last_mpar_reset;
234 static int mpar_count;
235 unsigned int time_delta;
236
237 /*
238 * For CMD_WRITE we know for a fact the caller should have checked
239 * the channel before writing to PCC space
240 */
241 if (cmd == CMD_READ) {
242 /*
243 * If there are pending cpc_writes, then we stole the channel
244 * before write completion, so first send a WRITE command to
245 * platform
246 */
247 if (pcc_data.pending_pcc_write_cmd)
248 send_pcc_cmd(CMD_WRITE);
249
250 ret = check_pcc_chan(false);
251 if (ret)
252 goto end;
253 } else /* CMD_WRITE */
254 pcc_data.pending_pcc_write_cmd = FALSE;
255
256 /*
257 * Handle the Minimum Request Turnaround Time(MRTT)
258 * "The minimum amount of time that OSPM must wait after the completion
259 * of a command before issuing the next command, in microseconds"
260 */
261 if (pcc_data.pcc_mrtt) {
262 time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
263 if (pcc_data.pcc_mrtt > time_delta)
264 udelay(pcc_data.pcc_mrtt - time_delta);
265 }
266
267 /*
268 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
269 * "The maximum number of periodic requests that the subspace channel can
270 * support, reported in commands per minute. 0 indicates no limitation."
271 *
272 * This parameter should be ideally zero or large enough so that it can
273 * handle maximum number of requests that all the cores in the system can
274 * collectively generate. If it is not, we will follow the spec and just
275 * not send the request to the platform after hitting the MPAR limit in
276 * any 60s window
277 */
278 if (pcc_data.pcc_mpar) {
279 if (mpar_count == 0) {
280 time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
281 if (time_delta < 60 * MSEC_PER_SEC) {
282 pr_debug("PCC cmd not sent due to MPAR limit");
283 ret = -EIO;
284 goto end;
285 }
286 last_mpar_reset = ktime_get();
287 mpar_count = pcc_data.pcc_mpar;
288 }
289 mpar_count--;
290 }
291
292 /* Write to the shared comm region. */
293 writew_relaxed(cmd, &generic_comm_base->command);
294
295 /* Flip CMD COMPLETE bit */
296 writew_relaxed(0, &generic_comm_base->status);
297
298 pcc_data.platform_owns_pcc = true;
299
300 /* Ring doorbell */
301 ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
302 if (ret < 0) {
303 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
304 cmd, ret);
305 goto end;
306 }
307
308 /* wait for completion and check for PCC errro bit */
309 ret = check_pcc_chan(true);
310
311 if (pcc_data.pcc_mrtt)
312 last_cmd_cmpl_time = ktime_get();
313
314 if (pcc_data.pcc_channel->mbox->txdone_irq)
315 mbox_chan_txdone(pcc_data.pcc_channel, ret);
316 else
317 mbox_client_txdone(pcc_data.pcc_channel, ret);
318
319end:
320 if (cmd == CMD_WRITE) {
321 if (unlikely(ret)) {
322 for_each_possible_cpu(i) {
323 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
324 if (!desc)
325 continue;
326
327 if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
328 desc->write_cmd_status = ret;
329 }
330 }
331 pcc_data.pcc_write_cnt++;
332 wake_up_all(&pcc_data.pcc_write_wait_q);
333 }
334
335 return ret;
336}
337
338static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
339{
340 if (ret < 0)
341 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
342 *(u16 *)msg, ret);
343 else
344 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
345 *(u16 *)msg, ret);
346}
347
348struct mbox_client cppc_mbox_cl = {
349 .tx_done = cppc_chan_tx_done,
350 .knows_txdone = true,
351};
352
353static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
354{
355 int result = -EFAULT;
356 acpi_status status = AE_OK;
357 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
358 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
359 struct acpi_buffer state = {0, NULL};
360 union acpi_object *psd = NULL;
361 struct acpi_psd_package *pdomain;
362
363 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
364 ACPI_TYPE_PACKAGE);
365 if (ACPI_FAILURE(status))
366 return -ENODEV;
367
368 psd = buffer.pointer;
369 if (!psd || psd->package.count != 1) {
370 pr_debug("Invalid _PSD data\n");
371 goto end;
372 }
373
374 pdomain = &(cpc_ptr->domain_info);
375
376 state.length = sizeof(struct acpi_psd_package);
377 state.pointer = pdomain;
378
379 status = acpi_extract_package(&(psd->package.elements[0]),
380 &format, &state);
381 if (ACPI_FAILURE(status)) {
382 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
383 goto end;
384 }
385
386 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
387 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
388 goto end;
389 }
390
391 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
392 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
393 goto end;
394 }
395
396 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
397 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
398 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
399 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
400 goto end;
401 }
402
403 result = 0;
404end:
405 kfree(buffer.pointer);
406 return result;
407}
408
409/**
410 * acpi_get_psd_map - Map the CPUs in a common freq domain.
411 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
412 *
413 * Return: 0 for success or negative value for err.
414 */
415int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
416{
417 int count_target;
418 int retval = 0;
419 unsigned int i, j;
420 cpumask_var_t covered_cpus;
421 struct cppc_cpudata *pr, *match_pr;
422 struct acpi_psd_package *pdomain;
423 struct acpi_psd_package *match_pdomain;
424 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
425
426 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
427 return -ENOMEM;
428
429 /*
430 * Now that we have _PSD data from all CPUs, lets setup P-state
431 * domain info.
432 */
433 for_each_possible_cpu(i) {
434 pr = all_cpu_data[i];
435 if (!pr)
436 continue;
437
438 if (cpumask_test_cpu(i, covered_cpus))
439 continue;
440
441 cpc_ptr = per_cpu(cpc_desc_ptr, i);
442 if (!cpc_ptr) {
443 retval = -EFAULT;
444 goto err_ret;
445 }
446
447 pdomain = &(cpc_ptr->domain_info);
448 cpumask_set_cpu(i, pr->shared_cpu_map);
449 cpumask_set_cpu(i, covered_cpus);
450 if (pdomain->num_processors <= 1)
451 continue;
452
453 /* Validate the Domain info */
454 count_target = pdomain->num_processors;
455 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
456 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
457 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
458 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
459 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
460 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
461
462 for_each_possible_cpu(j) {
463 if (i == j)
464 continue;
465
466 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
467 if (!match_cpc_ptr) {
468 retval = -EFAULT;
469 goto err_ret;
470 }
471
472 match_pdomain = &(match_cpc_ptr->domain_info);
473 if (match_pdomain->domain != pdomain->domain)
474 continue;
475
476 /* Here i and j are in the same domain */
477 if (match_pdomain->num_processors != count_target) {
478 retval = -EFAULT;
479 goto err_ret;
480 }
481
482 if (pdomain->coord_type != match_pdomain->coord_type) {
483 retval = -EFAULT;
484 goto err_ret;
485 }
486
487 cpumask_set_cpu(j, covered_cpus);
488 cpumask_set_cpu(j, pr->shared_cpu_map);
489 }
490
491 for_each_possible_cpu(j) {
492 if (i == j)
493 continue;
494
495 match_pr = all_cpu_data[j];
496 if (!match_pr)
497 continue;
498
499 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
500 if (!match_cpc_ptr) {
501 retval = -EFAULT;
502 goto err_ret;
503 }
504
505 match_pdomain = &(match_cpc_ptr->domain_info);
506 if (match_pdomain->domain != pdomain->domain)
507 continue;
508
509 match_pr->shared_type = pr->shared_type;
510 cpumask_copy(match_pr->shared_cpu_map,
511 pr->shared_cpu_map);
512 }
513 }
514
515err_ret:
516 for_each_possible_cpu(i) {
517 pr = all_cpu_data[i];
518 if (!pr)
519 continue;
520
521 /* Assume no coordination on any error parsing domain info */
522 if (retval) {
523 cpumask_clear(pr->shared_cpu_map);
524 cpumask_set_cpu(i, pr->shared_cpu_map);
525 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
526 }
527 }
528
529 free_cpumask_var(covered_cpus);
530 return retval;
531}
532EXPORT_SYMBOL_GPL(acpi_get_psd_map);
533
534static int register_pcc_channel(int pcc_subspace_idx)
535{
536 struct acpi_pcct_hw_reduced *cppc_ss;
537 u64 usecs_lat;
538
539 if (pcc_subspace_idx >= 0) {
540 pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
541 pcc_subspace_idx);
542
543 if (IS_ERR(pcc_data.pcc_channel)) {
544 pr_err("Failed to find PCC communication channel\n");
545 return -ENODEV;
546 }
547
548 /*
549 * The PCC mailbox controller driver should
550 * have parsed the PCCT (global table of all
551 * PCC channels) and stored pointers to the
552 * subspace communication region in con_priv.
553 */
554 cppc_ss = (pcc_data.pcc_channel)->con_priv;
555
556 if (!cppc_ss) {
557 pr_err("No PCC subspace found for CPPC\n");
558 return -ENODEV;
559 }
560
561 /*
562 * cppc_ss->latency is just a Nominal value. In reality
563 * the remote processor could be much slower to reply.
564 * So add an arbitrary amount of wait on top of Nominal.
565 */
566 usecs_lat = NUM_RETRIES * cppc_ss->latency;
567 pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
568 pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
569 pcc_data.pcc_mpar = cppc_ss->max_access_rate;
570 pcc_data.pcc_nominal = cppc_ss->latency;
571
572 pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
573 if (!pcc_data.pcc_comm_addr) {
574 pr_err("Failed to ioremap PCC comm region mem\n");
575 return -ENOMEM;
576 }
577
578 /* Set flag so that we dont come here for each CPU. */
579 pcc_data.pcc_channel_acquired = true;
580 }
581
582 return 0;
583}
584
585/**
586 * cpc_ffh_supported() - check if FFH reading supported
587 *
588 * Check if the architecture has support for functional fixed hardware
589 * read/write capability.
590 *
591 * Return: true for supported, false for not supported
592 */
593bool __weak cpc_ffh_supported(void)
594{
595 return false;
596}
597
598/*
599 * An example CPC table looks like the following.
600 *
601 * Name(_CPC, Package()
602 * {
603 * 17,
604 * NumEntries
605 * 1,
606 * // Revision
607 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
608 * // Highest Performance
609 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
610 * // Nominal Performance
611 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
612 * // Lowest Nonlinear Performance
613 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
614 * // Lowest Performance
615 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
616 * // Guaranteed Performance Register
617 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
618 * // Desired Performance Register
619 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
620 * ..
621 * ..
622 * ..
623 *
624 * }
625 * Each Register() encodes how to access that specific register.
626 * e.g. a sample PCC entry has the following encoding:
627 *
628 * Register (
629 * PCC,
630 * AddressSpaceKeyword
631 * 8,
632 * //RegisterBitWidth
633 * 8,
634 * //RegisterBitOffset
635 * 0x30,
636 * //RegisterAddress
637 * 9
638 * //AccessSize (subspace ID)
639 * 0
640 * )
641 * }
642 */
643
644/**
645 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
646 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
647 *
648 * Return: 0 for success or negative value for err.
649 */
650int acpi_cppc_processor_probe(struct acpi_processor *pr)
651{
652 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
653 union acpi_object *out_obj, *cpc_obj;
654 struct cpc_desc *cpc_ptr;
655 struct cpc_reg *gas_t;
656 struct device *cpu_dev;
657 acpi_handle handle = pr->handle;
658 unsigned int num_ent, i, cpc_rev;
659 acpi_status status;
660 int ret = -EFAULT;
661
662 /* Parse the ACPI _CPC table for this cpu. */
663 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
664 ACPI_TYPE_PACKAGE);
665 if (ACPI_FAILURE(status)) {
666 ret = -ENODEV;
667 goto out_buf_free;
668 }
669
670 out_obj = (union acpi_object *) output.pointer;
671
672 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
673 if (!cpc_ptr) {
674 ret = -ENOMEM;
675 goto out_buf_free;
676 }
677
678 /* First entry is NumEntries. */
679 cpc_obj = &out_obj->package.elements[0];
680 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
681 num_ent = cpc_obj->integer.value;
682 } else {
683 pr_debug("Unexpected entry type(%d) for NumEntries\n",
684 cpc_obj->type);
685 goto out_free;
686 }
687
688 /* Only support CPPCv2. Bail otherwise. */
689 if (num_ent != CPPC_NUM_ENT) {
690 pr_debug("Firmware exports %d entries. Expected: %d\n",
691 num_ent, CPPC_NUM_ENT);
692 goto out_free;
693 }
694
695 cpc_ptr->num_entries = num_ent;
696
697 /* Second entry should be revision. */
698 cpc_obj = &out_obj->package.elements[1];
699 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
700 cpc_rev = cpc_obj->integer.value;
701 } else {
702 pr_debug("Unexpected entry type(%d) for Revision\n",
703 cpc_obj->type);
704 goto out_free;
705 }
706
707 if (cpc_rev != CPPC_REV) {
708 pr_debug("Firmware exports revision:%d. Expected:%d\n",
709 cpc_rev, CPPC_REV);
710 goto out_free;
711 }
712
713 /* Iterate through remaining entries in _CPC */
714 for (i = 2; i < num_ent; i++) {
715 cpc_obj = &out_obj->package.elements[i];
716
717 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
718 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
719 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
720 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
721 gas_t = (struct cpc_reg *)
722 cpc_obj->buffer.pointer;
723
724 /*
725 * The PCC Subspace index is encoded inside
726 * the CPC table entries. The same PCC index
727 * will be used for all the PCC entries,
728 * so extract it only once.
729 */
730 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
731 if (pcc_data.pcc_subspace_idx < 0)
732 pcc_data.pcc_subspace_idx = gas_t->access_width;
733 else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
734 pr_debug("Mismatched PCC ids.\n");
735 goto out_free;
736 }
737 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
738 if (gas_t->address) {
739 void __iomem *addr;
740
741 addr = ioremap(gas_t->address, gas_t->bit_width/8);
742 if (!addr)
743 goto out_free;
744 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
745 }
746 } else {
747 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
748 /* Support only PCC ,SYS MEM and FFH type regs */
749 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
750 goto out_free;
751 }
752 }
753
754 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
755 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
756 } else {
757 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
758 goto out_free;
759 }
760 }
761 /* Store CPU Logical ID */
762 cpc_ptr->cpu_id = pr->id;
763
764 /* Parse PSD data for this CPU */
765 ret = acpi_get_psd(cpc_ptr, handle);
766 if (ret)
767 goto out_free;
768
769 /* Register PCC channel once for all CPUs. */
770 if (!pcc_data.pcc_channel_acquired) {
771 ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
772 if (ret)
773 goto out_free;
774
775 init_rwsem(&pcc_data.pcc_lock);
776 init_waitqueue_head(&pcc_data.pcc_write_wait_q);
777 }
778
779 /* Everything looks okay */
780 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
781
782 /* Add per logical CPU nodes for reading its feedback counters. */
783 cpu_dev = get_cpu_device(pr->id);
784 if (!cpu_dev) {
785 ret = -EINVAL;
786 goto out_free;
787 }
788
789 /* Plug PSD data into this CPUs CPC descriptor. */
790 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
791
792 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
793 "acpi_cppc");
794 if (ret) {
795 per_cpu(cpc_desc_ptr, pr->id) = NULL;
796 goto out_free;
797 }
798
799 kfree(output.pointer);
800 return 0;
801
802out_free:
803 /* Free all the mapped sys mem areas for this CPU */
804 for (i = 2; i < cpc_ptr->num_entries; i++) {
805 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
806
807 if (addr)
808 iounmap(addr);
809 }
810 kfree(cpc_ptr);
811
812out_buf_free:
813 kfree(output.pointer);
814 return ret;
815}
816EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
817
818/**
819 * acpi_cppc_processor_exit - Cleanup CPC structs.
820 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
821 *
822 * Return: Void
823 */
824void acpi_cppc_processor_exit(struct acpi_processor *pr)
825{
826 struct cpc_desc *cpc_ptr;
827 unsigned int i;
828 void __iomem *addr;
829
830 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
831 if (!cpc_ptr)
832 return;
833
834 /* Free all the mapped sys mem areas for this CPU */
835 for (i = 2; i < cpc_ptr->num_entries; i++) {
836 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
837 if (addr)
838 iounmap(addr);
839 }
840
841 kobject_put(&cpc_ptr->kobj);
842 kfree(cpc_ptr);
843}
844EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
845
846/**
847 * cpc_read_ffh() - Read FFH register
848 * @cpunum: cpu number to read
849 * @reg: cppc register information
850 * @val: place holder for return value
851 *
852 * Read bit_width bits from a specified address and bit_offset
853 *
854 * Return: 0 for success and error code
855 */
856int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
857{
858 return -ENOTSUPP;
859}
860
861/**
862 * cpc_write_ffh() - Write FFH register
863 * @cpunum: cpu number to write
864 * @reg: cppc register information
865 * @val: value to write
866 *
867 * Write value of bit_width bits to a specified address and bit_offset
868 *
869 * Return: 0 for success and error code
870 */
871int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
872{
873 return -ENOTSUPP;
874}
875
876/*
877 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
878 * as fast as possible. We have already mapped the PCC subspace during init, so
879 * we can directly write to it.
880 */
881
882static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
883{
884 int ret_val = 0;
885 void __iomem *vaddr = 0;
886 struct cpc_reg *reg = ®_res->cpc_entry.reg;
887
888 if (reg_res->type == ACPI_TYPE_INTEGER) {
889 *val = reg_res->cpc_entry.int_value;
890 return ret_val;
891 }
892
893 *val = 0;
894 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
895 vaddr = GET_PCC_VADDR(reg->address);
896 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
897 vaddr = reg_res->sys_mem_vaddr;
898 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
899 return cpc_read_ffh(cpu, reg, val);
900 else
901 return acpi_os_read_memory((acpi_physical_address)reg->address,
902 val, reg->bit_width);
903
904 switch (reg->bit_width) {
905 case 8:
906 *val = readb_relaxed(vaddr);
907 break;
908 case 16:
909 *val = readw_relaxed(vaddr);
910 break;
911 case 32:
912 *val = readl_relaxed(vaddr);
913 break;
914 case 64:
915 *val = readq_relaxed(vaddr);
916 break;
917 default:
918 pr_debug("Error: Cannot read %u bit width from PCC\n",
919 reg->bit_width);
920 ret_val = -EFAULT;
921 }
922
923 return ret_val;
924}
925
926static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
927{
928 int ret_val = 0;
929 void __iomem *vaddr = 0;
930 struct cpc_reg *reg = ®_res->cpc_entry.reg;
931
932 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
933 vaddr = GET_PCC_VADDR(reg->address);
934 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
935 vaddr = reg_res->sys_mem_vaddr;
936 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
937 return cpc_write_ffh(cpu, reg, val);
938 else
939 return acpi_os_write_memory((acpi_physical_address)reg->address,
940 val, reg->bit_width);
941
942 switch (reg->bit_width) {
943 case 8:
944 writeb_relaxed(val, vaddr);
945 break;
946 case 16:
947 writew_relaxed(val, vaddr);
948 break;
949 case 32:
950 writel_relaxed(val, vaddr);
951 break;
952 case 64:
953 writeq_relaxed(val, vaddr);
954 break;
955 default:
956 pr_debug("Error: Cannot write %u bit width to PCC\n",
957 reg->bit_width);
958 ret_val = -EFAULT;
959 break;
960 }
961
962 return ret_val;
963}
964
965/**
966 * cppc_get_perf_caps - Get a CPUs performance capabilities.
967 * @cpunum: CPU from which to get capabilities info.
968 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
969 *
970 * Return: 0 for success with perf_caps populated else -ERRNO.
971 */
972int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
973{
974 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
975 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
976 *nom_perf;
977 u64 high, low, nom;
978 int ret = 0, regs_in_pcc = 0;
979
980 if (!cpc_desc) {
981 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
982 return -ENODEV;
983 }
984
985 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
986 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
987 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
988 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
989
990 /* Are any of the regs PCC ?*/
991 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
992 CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) {
993 regs_in_pcc = 1;
994 down_write(&pcc_data.pcc_lock);
995 /* Ring doorbell once to update PCC subspace */
996 if (send_pcc_cmd(CMD_READ) < 0) {
997 ret = -EIO;
998 goto out_err;
999 }
1000 }
1001
1002 cpc_read(cpunum, highest_reg, &high);
1003 perf_caps->highest_perf = high;
1004
1005 cpc_read(cpunum, lowest_reg, &low);
1006 perf_caps->lowest_perf = low;
1007
1008 cpc_read(cpunum, nom_perf, &nom);
1009 perf_caps->nominal_perf = nom;
1010
1011 if (!high || !low || !nom)
1012 ret = -EFAULT;
1013
1014out_err:
1015 if (regs_in_pcc)
1016 up_write(&pcc_data.pcc_lock);
1017 return ret;
1018}
1019EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1020
1021/**
1022 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1023 * @cpunum: CPU from which to read counters.
1024 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1025 *
1026 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1027 */
1028int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1029{
1030 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1031 struct cpc_register_resource *delivered_reg, *reference_reg,
1032 *ref_perf_reg, *ctr_wrap_reg;
1033 u64 delivered, reference, ref_perf, ctr_wrap_time;
1034 int ret = 0, regs_in_pcc = 0;
1035
1036 if (!cpc_desc) {
1037 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1038 return -ENODEV;
1039 }
1040
1041 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1042 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1043 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1044 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1045
1046 /*
1047 * If refernce perf register is not supported then we should
1048 * use the nominal perf value
1049 */
1050 if (!CPC_SUPPORTED(ref_perf_reg))
1051 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1052
1053 /* Are any of the regs PCC ?*/
1054 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1055 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1056 down_write(&pcc_data.pcc_lock);
1057 regs_in_pcc = 1;
1058 /* Ring doorbell once to update PCC subspace */
1059 if (send_pcc_cmd(CMD_READ) < 0) {
1060 ret = -EIO;
1061 goto out_err;
1062 }
1063 }
1064
1065 cpc_read(cpunum, delivered_reg, &delivered);
1066 cpc_read(cpunum, reference_reg, &reference);
1067 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1068
1069 /*
1070 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1071 * performance counters are assumed to never wrap during the lifetime of
1072 * platform
1073 */
1074 ctr_wrap_time = (u64)(~((u64)0));
1075 if (CPC_SUPPORTED(ctr_wrap_reg))
1076 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1077
1078 if (!delivered || !reference || !ref_perf) {
1079 ret = -EFAULT;
1080 goto out_err;
1081 }
1082
1083 perf_fb_ctrs->delivered = delivered;
1084 perf_fb_ctrs->reference = reference;
1085 perf_fb_ctrs->reference_perf = ref_perf;
1086 perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time;
1087out_err:
1088 if (regs_in_pcc)
1089 up_write(&pcc_data.pcc_lock);
1090 return ret;
1091}
1092EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1093
1094/**
1095 * cppc_set_perf - Set a CPUs performance controls.
1096 * @cpu: CPU for which to set performance controls.
1097 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1098 *
1099 * Return: 0 for success, -ERRNO otherwise.
1100 */
1101int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1102{
1103 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1104 struct cpc_register_resource *desired_reg;
1105 int ret = 0;
1106
1107 if (!cpc_desc) {
1108 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1109 return -ENODEV;
1110 }
1111
1112 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1113
1114 /*
1115 * This is Phase-I where we want to write to CPC registers
1116 * -> We want all CPUs to be able to execute this phase in parallel
1117 *
1118 * Since read_lock can be acquired by multiple CPUs simultaneously we
1119 * achieve that goal here
1120 */
1121 if (CPC_IN_PCC(desired_reg)) {
1122 down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */
1123 if (pcc_data.platform_owns_pcc) {
1124 ret = check_pcc_chan(false);
1125 if (ret) {
1126 up_read(&pcc_data.pcc_lock);
1127 return ret;
1128 }
1129 }
1130 /*
1131 * Update the pending_write to make sure a PCC CMD_READ will not
1132 * arrive and steal the channel during the switch to write lock
1133 */
1134 pcc_data.pending_pcc_write_cmd = true;
1135 cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
1136 cpc_desc->write_cmd_status = 0;
1137 }
1138
1139 /*
1140 * Skip writing MIN/MAX until Linux knows how to come up with
1141 * useful values.
1142 */
1143 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1144
1145 if (CPC_IN_PCC(desired_reg))
1146 up_read(&pcc_data.pcc_lock); /* END Phase-I */
1147 /*
1148 * This is Phase-II where we transfer the ownership of PCC to Platform
1149 *
1150 * Short Summary: Basically if we think of a group of cppc_set_perf
1151 * requests that happened in short overlapping interval. The last CPU to
1152 * come out of Phase-I will enter Phase-II and ring the doorbell.
1153 *
1154 * We have the following requirements for Phase-II:
1155 * 1. We want to execute Phase-II only when there are no CPUs
1156 * currently executing in Phase-I
1157 * 2. Once we start Phase-II we want to avoid all other CPUs from
1158 * entering Phase-I.
1159 * 3. We want only one CPU among all those who went through Phase-I
1160 * to run phase-II
1161 *
1162 * If write_trylock fails to get the lock and doesn't transfer the
1163 * PCC ownership to the platform, then one of the following will be TRUE
1164 * 1. There is at-least one CPU in Phase-I which will later execute
1165 * write_trylock, so the CPUs in Phase-I will be responsible for
1166 * executing the Phase-II.
1167 * 2. Some other CPU has beaten this CPU to successfully execute the
1168 * write_trylock and has already acquired the write_lock. We know for a
1169 * fact it(other CPU acquiring the write_lock) couldn't have happened
1170 * before this CPU's Phase-I as we held the read_lock.
1171 * 3. Some other CPU executing pcc CMD_READ has stolen the
1172 * down_write, in which case, send_pcc_cmd will check for pending
1173 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1174 * So this CPU can be certain that its request will be delivered
1175 * So in all cases, this CPU knows that its request will be delivered
1176 * by another CPU and can return
1177 *
1178 * After getting the down_write we still need to check for
1179 * pending_pcc_write_cmd to take care of the following scenario
1180 * The thread running this code could be scheduled out between
1181 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1182 * could have delivered the request to Platform by triggering the
1183 * doorbell and transferred the ownership of PCC to platform. So this
1184 * avoids triggering an unnecessary doorbell and more importantly before
1185 * triggering the doorbell it makes sure that the PCC channel ownership
1186 * is still with OSPM.
1187 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1188 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1189 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1190 * case during a CMD_READ and if there are pending writes it delivers
1191 * the write command before servicing the read command
1192 */
1193 if (CPC_IN_PCC(desired_reg)) {
1194 if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */
1195 /* Update only if there are pending write commands */
1196 if (pcc_data.pending_pcc_write_cmd)
1197 send_pcc_cmd(CMD_WRITE);
1198 up_write(&pcc_data.pcc_lock); /* END Phase-II */
1199 } else
1200 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1201 wait_event(pcc_data.pcc_write_wait_q,
1202 cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
1203
1204 /* send_pcc_cmd updates the status in case of failure */
1205 ret = cpc_desc->write_cmd_status;
1206 }
1207 return ret;
1208}
1209EXPORT_SYMBOL_GPL(cppc_set_perf);
1210
1211/**
1212 * cppc_get_transition_latency - returns frequency transition latency in ns
1213 *
1214 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1215 * transition latency for perfromance change requests. The closest we have
1216 * is the timing information from the PCCT tables which provides the info
1217 * on the number and frequency of PCC commands the platform can handle.
1218 */
1219unsigned int cppc_get_transition_latency(int cpu_num)
1220{
1221 /*
1222 * Expected transition latency is based on the PCCT timing values
1223 * Below are definition from ACPI spec:
1224 * pcc_nominal- Expected latency to process a command, in microseconds
1225 * pcc_mpar - The maximum number of periodic requests that the subspace
1226 * channel can support, reported in commands per minute. 0
1227 * indicates no limitation.
1228 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1229 * completion of a command before issuing the next command,
1230 * in microseconds.
1231 */
1232 unsigned int latency_ns = 0;
1233 struct cpc_desc *cpc_desc;
1234 struct cpc_register_resource *desired_reg;
1235
1236 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1237 if (!cpc_desc)
1238 return CPUFREQ_ETERNAL;
1239
1240 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1241 if (!CPC_IN_PCC(desired_reg))
1242 return CPUFREQ_ETERNAL;
1243
1244 if (pcc_data.pcc_mpar)
1245 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
1246
1247 latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
1248 latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
1249
1250 return latency_ns;
1251}
1252EXPORT_SYMBOL_GPL(cppc_get_transition_latency);