Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   3 *
   4 * (C) Copyright 2014, 2015 Linaro Ltd.
   5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 *
  12 * CPPC describes a few methods for controlling CPU performance using
  13 * information from a per CPU table called CPC. This table is described in
  14 * the ACPI v5.0+ specification. The table consists of a list of
  15 * registers which may be memory mapped or hardware registers and also may
  16 * include some static integer values.
  17 *
  18 * CPU performance is on an abstract continuous scale as against a discretized
  19 * P-state scale which is tied to CPU frequency only. In brief, the basic
  20 * operation involves:
  21 *
  22 * - OS makes a CPU performance request. (Can provide min and max bounds)
  23 *
  24 * - Platform (such as BMC) is free to optimize request within requested bounds
  25 *   depending on power/thermal budgets etc.
  26 *
  27 * - Platform conveys its decision back to OS
  28 *
  29 * The communication between OS and platform occurs through another medium
  30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
  31 * mechanism which includes doorbell semantics to indicate register updates.
  32 * See drivers/mailbox/pcc.c for details on PCC.
  33 *
  34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  35 * above specifications.
  36 */
  37
  38#define pr_fmt(fmt)	"ACPI CPPC: " fmt
  39
  40#include <linux/cpufreq.h>
  41#include <linux/delay.h>
  42#include <linux/ktime.h>
  43#include <linux/rwsem.h>
  44#include <linux/wait.h>
  45
  46#include <acpi/cppc_acpi.h>
  47
  48struct cppc_pcc_data {
  49	struct mbox_chan *pcc_channel;
  50	void __iomem *pcc_comm_addr;
  51	int pcc_subspace_idx;
  52	bool pcc_channel_acquired;
  53	ktime_t deadline;
  54	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  55
  56	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
  57	bool platform_owns_pcc;		/* Ownership of PCC subspace */
  58	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
  59
  60	/*
  61	 * Lock to provide controlled access to the PCC channel.
  62	 *
  63	 * For performance critical usecases(currently cppc_set_perf)
  64	 *	We need to take read_lock and check if channel belongs to OSPM
  65	 * before reading or writing to PCC subspace
  66	 *	We need to take write_lock before transferring the channel
  67	 * ownership to the platform via a Doorbell
  68	 *	This allows us to batch a number of CPPC requests if they happen
  69	 * to originate in about the same time
  70	 *
  71	 * For non-performance critical usecases(init)
  72	 *	Take write_lock for all purposes which gives exclusive access
  73	 */
  74	struct rw_semaphore pcc_lock;
  75
  76	/* Wait queue for CPUs whose requests were batched */
  77	wait_queue_head_t pcc_write_wait_q;
  78};
  79
  80/* Structure to represent the single PCC channel */
  81static struct cppc_pcc_data pcc_data = {
  82	.pcc_subspace_idx = -1,
  83	.platform_owns_pcc = true,
  84};
  85
  86/*
  87 * The cpc_desc structure contains the ACPI register details
  88 * as described in the per CPU _CPC tables. The details
  89 * include the type of register (e.g. PCC, System IO, FFH etc.)
  90 * and destination addresses which lets us READ/WRITE CPU performance
  91 * information using the appropriate I/O methods.
  92 */
  93static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  94
  95/* pcc mapped address + header size + offset within PCC subspace */
  96#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
  97
  98/* Check if a CPC regsiter is in PCC */
  99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
 100				(cpc)->cpc_entry.reg.space_id ==	\
 101				ACPI_ADR_SPACE_PLATFORM_COMM)
 102
 103/* Evalutes to True if reg is a NULL register descriptor */
 104#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 105				(reg)->address == 0 &&			\
 106				(reg)->bit_width == 0 &&		\
 107				(reg)->bit_offset == 0 &&		\
 108				(reg)->access_width == 0)
 109
 110/* Evalutes to True if an optional cpc field is supported */
 111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
 112				!!(cpc)->cpc_entry.int_value :		\
 113				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
 114/*
 115 * Arbitrary Retries in case the remote processor is slow to respond
 116 * to PCC commands. Keeping it high enough to cover emulators where
 117 * the processors run painfully slow.
 118 */
 119#define NUM_RETRIES 500
 120
 121struct cppc_attr {
 122	struct attribute attr;
 123	ssize_t (*show)(struct kobject *kobj,
 124			struct attribute *attr, char *buf);
 125	ssize_t (*store)(struct kobject *kobj,
 126			struct attribute *attr, const char *c, ssize_t count);
 127};
 128
 129#define define_one_cppc_ro(_name)		\
 130static struct cppc_attr _name =			\
 131__ATTR(_name, 0444, show_##_name, NULL)
 132
 133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 134
 135static ssize_t show_feedback_ctrs(struct kobject *kobj,
 136		struct attribute *attr, char *buf)
 137{
 138	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 139	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 140
 141	cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 142
 143	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
 144			fb_ctrs.reference, fb_ctrs.delivered);
 145}
 146define_one_cppc_ro(feedback_ctrs);
 147
 148static ssize_t show_reference_perf(struct kobject *kobj,
 149		struct attribute *attr, char *buf)
 150{
 151	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 152	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 153
 154	cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 155
 156	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 157			fb_ctrs.reference_perf);
 158}
 159define_one_cppc_ro(reference_perf);
 160
 161static ssize_t show_wraparound_time(struct kobject *kobj,
 162				struct attribute *attr, char *buf)
 163{
 164	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 165	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 166
 167	cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 168
 169	return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time);
 170
 171}
 172define_one_cppc_ro(wraparound_time);
 173
 174static struct attribute *cppc_attrs[] = {
 175	&feedback_ctrs.attr,
 176	&reference_perf.attr,
 177	&wraparound_time.attr,
 178	NULL
 179};
 180
 181static struct kobj_type cppc_ktype = {
 182	.sysfs_ops = &kobj_sysfs_ops,
 183	.default_attrs = cppc_attrs,
 184};
 185
 186static int check_pcc_chan(bool chk_err_bit)
 187{
 188	int ret = -EIO, status = 0;
 189	struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
 190	ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
 191
 192	if (!pcc_data.platform_owns_pcc)
 193		return 0;
 194
 195	/* Retry in case the remote processor was too slow to catch up. */
 196	while (!ktime_after(ktime_get(), next_deadline)) {
 197		/*
 198		 * Per spec, prior to boot the PCC space wil be initialized by
 199		 * platform and should have set the command completion bit when
 200		 * PCC can be used by OSPM
 201		 */
 202		status = readw_relaxed(&generic_comm_base->status);
 203		if (status & PCC_CMD_COMPLETE_MASK) {
 204			ret = 0;
 205			if (chk_err_bit && (status & PCC_ERROR_MASK))
 206				ret = -EIO;
 207			break;
 208		}
 209		/*
 210		 * Reducing the bus traffic in case this loop takes longer than
 211		 * a few retries.
 212		 */
 213		udelay(3);
 214	}
 215
 216	if (likely(!ret))
 217		pcc_data.platform_owns_pcc = false;
 218	else
 219		pr_err("PCC check channel failed. Status=%x\n", status);
 220
 221	return ret;
 222}
 223
 224/*
 225 * This function transfers the ownership of the PCC to the platform
 226 * So it must be called while holding write_lock(pcc_lock)
 227 */
 228static int send_pcc_cmd(u16 cmd)
 229{
 230	int ret = -EIO, i;
 231	struct acpi_pcct_shared_memory *generic_comm_base =
 232		(struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
 233	static ktime_t last_cmd_cmpl_time, last_mpar_reset;
 234	static int mpar_count;
 235	unsigned int time_delta;
 236
 237	/*
 238	 * For CMD_WRITE we know for a fact the caller should have checked
 239	 * the channel before writing to PCC space
 240	 */
 241	if (cmd == CMD_READ) {
 242		/*
 243		 * If there are pending cpc_writes, then we stole the channel
 244		 * before write completion, so first send a WRITE command to
 245		 * platform
 246		 */
 247		if (pcc_data.pending_pcc_write_cmd)
 248			send_pcc_cmd(CMD_WRITE);
 249
 250		ret = check_pcc_chan(false);
 251		if (ret)
 252			goto end;
 253	} else /* CMD_WRITE */
 254		pcc_data.pending_pcc_write_cmd = FALSE;
 255
 256	/*
 257	 * Handle the Minimum Request Turnaround Time(MRTT)
 258	 * "The minimum amount of time that OSPM must wait after the completion
 259	 * of a command before issuing the next command, in microseconds"
 260	 */
 261	if (pcc_data.pcc_mrtt) {
 262		time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
 263		if (pcc_data.pcc_mrtt > time_delta)
 264			udelay(pcc_data.pcc_mrtt - time_delta);
 265	}
 266
 267	/*
 268	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 269	 * "The maximum number of periodic requests that the subspace channel can
 270	 * support, reported in commands per minute. 0 indicates no limitation."
 271	 *
 272	 * This parameter should be ideally zero or large enough so that it can
 273	 * handle maximum number of requests that all the cores in the system can
 274	 * collectively generate. If it is not, we will follow the spec and just
 275	 * not send the request to the platform after hitting the MPAR limit in
 276	 * any 60s window
 277	 */
 278	if (pcc_data.pcc_mpar) {
 279		if (mpar_count == 0) {
 280			time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
 281			if (time_delta < 60 * MSEC_PER_SEC) {
 282				pr_debug("PCC cmd not sent due to MPAR limit");
 283				ret = -EIO;
 284				goto end;
 285			}
 286			last_mpar_reset = ktime_get();
 287			mpar_count = pcc_data.pcc_mpar;
 288		}
 289		mpar_count--;
 290	}
 291
 292	/* Write to the shared comm region. */
 293	writew_relaxed(cmd, &generic_comm_base->command);
 294
 295	/* Flip CMD COMPLETE bit */
 296	writew_relaxed(0, &generic_comm_base->status);
 297
 298	pcc_data.platform_owns_pcc = true;
 299
 300	/* Ring doorbell */
 301	ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
 302	if (ret < 0) {
 303		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
 304				cmd, ret);
 305		goto end;
 306	}
 307
 308	/* wait for completion and check for PCC errro bit */
 309	ret = check_pcc_chan(true);
 310
 311	if (pcc_data.pcc_mrtt)
 312		last_cmd_cmpl_time = ktime_get();
 313
 314	if (pcc_data.pcc_channel->mbox->txdone_irq)
 315		mbox_chan_txdone(pcc_data.pcc_channel, ret);
 316	else
 317		mbox_client_txdone(pcc_data.pcc_channel, ret);
 318
 319end:
 320	if (cmd == CMD_WRITE) {
 321		if (unlikely(ret)) {
 322			for_each_possible_cpu(i) {
 323				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 324				if (!desc)
 325					continue;
 326
 327				if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
 328					desc->write_cmd_status = ret;
 329			}
 330		}
 331		pcc_data.pcc_write_cnt++;
 332		wake_up_all(&pcc_data.pcc_write_wait_q);
 333	}
 334
 335	return ret;
 336}
 337
 338static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 339{
 340	if (ret < 0)
 341		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 342				*(u16 *)msg, ret);
 343	else
 344		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 345				*(u16 *)msg, ret);
 346}
 347
 348struct mbox_client cppc_mbox_cl = {
 349	.tx_done = cppc_chan_tx_done,
 350	.knows_txdone = true,
 351};
 352
 353static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 354{
 355	int result = -EFAULT;
 356	acpi_status status = AE_OK;
 357	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 358	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 359	struct acpi_buffer state = {0, NULL};
 360	union acpi_object  *psd = NULL;
 361	struct acpi_psd_package *pdomain;
 362
 363	status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
 364			ACPI_TYPE_PACKAGE);
 365	if (ACPI_FAILURE(status))
 366		return -ENODEV;
 367
 368	psd = buffer.pointer;
 369	if (!psd || psd->package.count != 1) {
 370		pr_debug("Invalid _PSD data\n");
 371		goto end;
 372	}
 373
 374	pdomain = &(cpc_ptr->domain_info);
 375
 376	state.length = sizeof(struct acpi_psd_package);
 377	state.pointer = pdomain;
 378
 379	status = acpi_extract_package(&(psd->package.elements[0]),
 380		&format, &state);
 381	if (ACPI_FAILURE(status)) {
 382		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 383		goto end;
 384	}
 385
 386	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 387		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 388		goto end;
 389	}
 390
 391	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 392		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 393		goto end;
 394	}
 395
 396	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 397	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 398	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 399		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 400		goto end;
 401	}
 402
 403	result = 0;
 404end:
 405	kfree(buffer.pointer);
 406	return result;
 407}
 408
 409/**
 410 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 411 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 412 *
 413 *	Return: 0 for success or negative value for err.
 414 */
 415int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
 416{
 417	int count_target;
 418	int retval = 0;
 419	unsigned int i, j;
 420	cpumask_var_t covered_cpus;
 421	struct cppc_cpudata *pr, *match_pr;
 422	struct acpi_psd_package *pdomain;
 423	struct acpi_psd_package *match_pdomain;
 424	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 425
 426	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 427		return -ENOMEM;
 428
 429	/*
 430	 * Now that we have _PSD data from all CPUs, lets setup P-state
 431	 * domain info.
 432	 */
 433	for_each_possible_cpu(i) {
 434		pr = all_cpu_data[i];
 435		if (!pr)
 436			continue;
 437
 438		if (cpumask_test_cpu(i, covered_cpus))
 439			continue;
 440
 441		cpc_ptr = per_cpu(cpc_desc_ptr, i);
 442		if (!cpc_ptr) {
 443			retval = -EFAULT;
 444			goto err_ret;
 445		}
 446
 447		pdomain = &(cpc_ptr->domain_info);
 448		cpumask_set_cpu(i, pr->shared_cpu_map);
 449		cpumask_set_cpu(i, covered_cpus);
 450		if (pdomain->num_processors <= 1)
 451			continue;
 452
 453		/* Validate the Domain info */
 454		count_target = pdomain->num_processors;
 455		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 456			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 457		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 458			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
 459		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 460			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 461
 462		for_each_possible_cpu(j) {
 463			if (i == j)
 464				continue;
 465
 466			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 467			if (!match_cpc_ptr) {
 468				retval = -EFAULT;
 469				goto err_ret;
 470			}
 471
 472			match_pdomain = &(match_cpc_ptr->domain_info);
 473			if (match_pdomain->domain != pdomain->domain)
 474				continue;
 475
 476			/* Here i and j are in the same domain */
 477			if (match_pdomain->num_processors != count_target) {
 478				retval = -EFAULT;
 479				goto err_ret;
 480			}
 481
 482			if (pdomain->coord_type != match_pdomain->coord_type) {
 483				retval = -EFAULT;
 484				goto err_ret;
 485			}
 486
 487			cpumask_set_cpu(j, covered_cpus);
 488			cpumask_set_cpu(j, pr->shared_cpu_map);
 489		}
 490
 491		for_each_possible_cpu(j) {
 492			if (i == j)
 493				continue;
 494
 495			match_pr = all_cpu_data[j];
 496			if (!match_pr)
 497				continue;
 498
 499			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 500			if (!match_cpc_ptr) {
 501				retval = -EFAULT;
 502				goto err_ret;
 503			}
 504
 505			match_pdomain = &(match_cpc_ptr->domain_info);
 506			if (match_pdomain->domain != pdomain->domain)
 507				continue;
 508
 509			match_pr->shared_type = pr->shared_type;
 510			cpumask_copy(match_pr->shared_cpu_map,
 511				     pr->shared_cpu_map);
 512		}
 513	}
 514
 515err_ret:
 516	for_each_possible_cpu(i) {
 517		pr = all_cpu_data[i];
 518		if (!pr)
 519			continue;
 520
 521		/* Assume no coordination on any error parsing domain info */
 522		if (retval) {
 523			cpumask_clear(pr->shared_cpu_map);
 524			cpumask_set_cpu(i, pr->shared_cpu_map);
 525			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 526		}
 527	}
 528
 529	free_cpumask_var(covered_cpus);
 530	return retval;
 531}
 532EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 533
 534static int register_pcc_channel(int pcc_subspace_idx)
 535{
 536	struct acpi_pcct_hw_reduced *cppc_ss;
 537	u64 usecs_lat;
 538
 539	if (pcc_subspace_idx >= 0) {
 540		pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
 541				pcc_subspace_idx);
 542
 543		if (IS_ERR(pcc_data.pcc_channel)) {
 544			pr_err("Failed to find PCC communication channel\n");
 545			return -ENODEV;
 546		}
 547
 548		/*
 549		 * The PCC mailbox controller driver should
 550		 * have parsed the PCCT (global table of all
 551		 * PCC channels) and stored pointers to the
 552		 * subspace communication region in con_priv.
 553		 */
 554		cppc_ss = (pcc_data.pcc_channel)->con_priv;
 555
 556		if (!cppc_ss) {
 557			pr_err("No PCC subspace found for CPPC\n");
 558			return -ENODEV;
 559		}
 560
 561		/*
 562		 * cppc_ss->latency is just a Nominal value. In reality
 563		 * the remote processor could be much slower to reply.
 564		 * So add an arbitrary amount of wait on top of Nominal.
 565		 */
 566		usecs_lat = NUM_RETRIES * cppc_ss->latency;
 567		pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
 568		pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
 569		pcc_data.pcc_mpar = cppc_ss->max_access_rate;
 570		pcc_data.pcc_nominal = cppc_ss->latency;
 571
 572		pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
 573		if (!pcc_data.pcc_comm_addr) {
 574			pr_err("Failed to ioremap PCC comm region mem\n");
 575			return -ENOMEM;
 576		}
 577
 578		/* Set flag so that we dont come here for each CPU. */
 579		pcc_data.pcc_channel_acquired = true;
 580	}
 581
 582	return 0;
 583}
 584
 585/**
 586 * cpc_ffh_supported() - check if FFH reading supported
 587 *
 588 * Check if the architecture has support for functional fixed hardware
 589 * read/write capability.
 590 *
 591 * Return: true for supported, false for not supported
 592 */
 593bool __weak cpc_ffh_supported(void)
 594{
 595	return false;
 596}
 597
 598/*
 599 * An example CPC table looks like the following.
 600 *
 601 *	Name(_CPC, Package()
 602 *			{
 603 *			17,
 604 *			NumEntries
 605 *			1,
 606 *			// Revision
 607 *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 608 *			// Highest Performance
 609 *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 610 *			// Nominal Performance
 611 *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 612 *			// Lowest Nonlinear Performance
 613 *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 614 *			// Lowest Performance
 615 *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 616 *			// Guaranteed Performance Register
 617 *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 618 *			// Desired Performance Register
 619 *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 620 *			..
 621 *			..
 622 *			..
 623 *
 624 *		}
 625 * Each Register() encodes how to access that specific register.
 626 * e.g. a sample PCC entry has the following encoding:
 627 *
 628 *	Register (
 629 *		PCC,
 630 *		AddressSpaceKeyword
 631 *		8,
 632 *		//RegisterBitWidth
 633 *		8,
 634 *		//RegisterBitOffset
 635 *		0x30,
 636 *		//RegisterAddress
 637 *		9
 638 *		//AccessSize (subspace ID)
 639 *		0
 640 *		)
 641 *	}
 642 */
 643
 644/**
 645 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 646 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 647 *
 648 *	Return: 0 for success or negative value for err.
 649 */
 650int acpi_cppc_processor_probe(struct acpi_processor *pr)
 651{
 652	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 653	union acpi_object *out_obj, *cpc_obj;
 654	struct cpc_desc *cpc_ptr;
 655	struct cpc_reg *gas_t;
 656	struct device *cpu_dev;
 657	acpi_handle handle = pr->handle;
 658	unsigned int num_ent, i, cpc_rev;
 659	acpi_status status;
 660	int ret = -EFAULT;
 661
 662	/* Parse the ACPI _CPC table for this cpu. */
 663	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 664			ACPI_TYPE_PACKAGE);
 665	if (ACPI_FAILURE(status)) {
 666		ret = -ENODEV;
 667		goto out_buf_free;
 668	}
 669
 670	out_obj = (union acpi_object *) output.pointer;
 671
 672	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 673	if (!cpc_ptr) {
 674		ret = -ENOMEM;
 675		goto out_buf_free;
 676	}
 677
 678	/* First entry is NumEntries. */
 679	cpc_obj = &out_obj->package.elements[0];
 680	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 681		num_ent = cpc_obj->integer.value;
 682	} else {
 683		pr_debug("Unexpected entry type(%d) for NumEntries\n",
 684				cpc_obj->type);
 685		goto out_free;
 686	}
 687
 688	/* Only support CPPCv2. Bail otherwise. */
 689	if (num_ent != CPPC_NUM_ENT) {
 690		pr_debug("Firmware exports %d entries. Expected: %d\n",
 691				num_ent, CPPC_NUM_ENT);
 692		goto out_free;
 693	}
 694
 695	cpc_ptr->num_entries = num_ent;
 696
 697	/* Second entry should be revision. */
 698	cpc_obj = &out_obj->package.elements[1];
 699	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 700		cpc_rev = cpc_obj->integer.value;
 701	} else {
 702		pr_debug("Unexpected entry type(%d) for Revision\n",
 703				cpc_obj->type);
 704		goto out_free;
 705	}
 706
 707	if (cpc_rev != CPPC_REV) {
 708		pr_debug("Firmware exports revision:%d. Expected:%d\n",
 709				cpc_rev, CPPC_REV);
 710		goto out_free;
 711	}
 712
 713	/* Iterate through remaining entries in _CPC */
 714	for (i = 2; i < num_ent; i++) {
 715		cpc_obj = &out_obj->package.elements[i];
 716
 717		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 718			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 719			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 720		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 721			gas_t = (struct cpc_reg *)
 722				cpc_obj->buffer.pointer;
 723
 724			/*
 725			 * The PCC Subspace index is encoded inside
 726			 * the CPC table entries. The same PCC index
 727			 * will be used for all the PCC entries,
 728			 * so extract it only once.
 729			 */
 730			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 731				if (pcc_data.pcc_subspace_idx < 0)
 732					pcc_data.pcc_subspace_idx = gas_t->access_width;
 733				else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
 734					pr_debug("Mismatched PCC ids.\n");
 735					goto out_free;
 736				}
 737			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 738				if (gas_t->address) {
 739					void __iomem *addr;
 740
 741					addr = ioremap(gas_t->address, gas_t->bit_width/8);
 742					if (!addr)
 743						goto out_free;
 744					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 745				}
 746			} else {
 747				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 748					/* Support only PCC ,SYS MEM and FFH type regs */
 749					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
 750					goto out_free;
 751				}
 752			}
 753
 754			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 755			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 756		} else {
 757			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
 758			goto out_free;
 759		}
 760	}
 761	/* Store CPU Logical ID */
 762	cpc_ptr->cpu_id = pr->id;
 763
 764	/* Parse PSD data for this CPU */
 765	ret = acpi_get_psd(cpc_ptr, handle);
 766	if (ret)
 767		goto out_free;
 768
 769	/* Register PCC channel once for all CPUs. */
 770	if (!pcc_data.pcc_channel_acquired) {
 771		ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
 772		if (ret)
 773			goto out_free;
 774
 775		init_rwsem(&pcc_data.pcc_lock);
 776		init_waitqueue_head(&pcc_data.pcc_write_wait_q);
 777	}
 778
 779	/* Everything looks okay */
 780	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 781
 782	/* Add per logical CPU nodes for reading its feedback counters. */
 783	cpu_dev = get_cpu_device(pr->id);
 784	if (!cpu_dev) {
 785		ret = -EINVAL;
 786		goto out_free;
 787	}
 788
 789	/* Plug PSD data into this CPUs CPC descriptor. */
 790	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 791
 792	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 793			"acpi_cppc");
 794	if (ret) {
 795		per_cpu(cpc_desc_ptr, pr->id) = NULL;
 796		goto out_free;
 797	}
 798
 799	kfree(output.pointer);
 800	return 0;
 801
 802out_free:
 803	/* Free all the mapped sys mem areas for this CPU */
 804	for (i = 2; i < cpc_ptr->num_entries; i++) {
 805		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 806
 807		if (addr)
 808			iounmap(addr);
 809	}
 810	kfree(cpc_ptr);
 811
 812out_buf_free:
 813	kfree(output.pointer);
 814	return ret;
 815}
 816EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 817
 818/**
 819 * acpi_cppc_processor_exit - Cleanup CPC structs.
 820 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 821 *
 822 * Return: Void
 823 */
 824void acpi_cppc_processor_exit(struct acpi_processor *pr)
 825{
 826	struct cpc_desc *cpc_ptr;
 827	unsigned int i;
 828	void __iomem *addr;
 829
 830	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 831	if (!cpc_ptr)
 832		return;
 833
 834	/* Free all the mapped sys mem areas for this CPU */
 835	for (i = 2; i < cpc_ptr->num_entries; i++) {
 836		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 837		if (addr)
 838			iounmap(addr);
 839	}
 840
 841	kobject_put(&cpc_ptr->kobj);
 842	kfree(cpc_ptr);
 843}
 844EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 845
 846/**
 847 * cpc_read_ffh() - Read FFH register
 848 * @cpunum:	cpu number to read
 849 * @reg:	cppc register information
 850 * @val:	place holder for return value
 851 *
 852 * Read bit_width bits from a specified address and bit_offset
 853 *
 854 * Return: 0 for success and error code
 855 */
 856int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 857{
 858	return -ENOTSUPP;
 859}
 860
 861/**
 862 * cpc_write_ffh() - Write FFH register
 863 * @cpunum:	cpu number to write
 864 * @reg:	cppc register information
 865 * @val:	value to write
 866 *
 867 * Write value of bit_width bits to a specified address and bit_offset
 868 *
 869 * Return: 0 for success and error code
 870 */
 871int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 872{
 873	return -ENOTSUPP;
 874}
 875
 876/*
 877 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 878 * as fast as possible. We have already mapped the PCC subspace during init, so
 879 * we can directly write to it.
 880 */
 881
 882static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 883{
 884	int ret_val = 0;
 885	void __iomem *vaddr = 0;
 886	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 887
 888	if (reg_res->type == ACPI_TYPE_INTEGER) {
 889		*val = reg_res->cpc_entry.int_value;
 890		return ret_val;
 891	}
 892
 893	*val = 0;
 894	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
 895		vaddr = GET_PCC_VADDR(reg->address);
 896	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 897		vaddr = reg_res->sys_mem_vaddr;
 898	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 899		return cpc_read_ffh(cpu, reg, val);
 900	else
 901		return acpi_os_read_memory((acpi_physical_address)reg->address,
 902				val, reg->bit_width);
 903
 904	switch (reg->bit_width) {
 905		case 8:
 906			*val = readb_relaxed(vaddr);
 907			break;
 908		case 16:
 909			*val = readw_relaxed(vaddr);
 910			break;
 911		case 32:
 912			*val = readl_relaxed(vaddr);
 913			break;
 914		case 64:
 915			*val = readq_relaxed(vaddr);
 916			break;
 917		default:
 918			pr_debug("Error: Cannot read %u bit width from PCC\n",
 919					reg->bit_width);
 920			ret_val = -EFAULT;
 921	}
 922
 923	return ret_val;
 924}
 925
 926static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
 927{
 928	int ret_val = 0;
 929	void __iomem *vaddr = 0;
 930	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 931
 932	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
 933		vaddr = GET_PCC_VADDR(reg->address);
 934	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 935		vaddr = reg_res->sys_mem_vaddr;
 936	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 937		return cpc_write_ffh(cpu, reg, val);
 938	else
 939		return acpi_os_write_memory((acpi_physical_address)reg->address,
 940				val, reg->bit_width);
 941
 942	switch (reg->bit_width) {
 943		case 8:
 944			writeb_relaxed(val, vaddr);
 945			break;
 946		case 16:
 947			writew_relaxed(val, vaddr);
 948			break;
 949		case 32:
 950			writel_relaxed(val, vaddr);
 951			break;
 952		case 64:
 953			writeq_relaxed(val, vaddr);
 954			break;
 955		default:
 956			pr_debug("Error: Cannot write %u bit width to PCC\n",
 957					reg->bit_width);
 958			ret_val = -EFAULT;
 959			break;
 960	}
 961
 962	return ret_val;
 963}
 964
 965/**
 966 * cppc_get_perf_caps - Get a CPUs performance capabilities.
 967 * @cpunum: CPU from which to get capabilities info.
 968 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
 969 *
 970 * Return: 0 for success with perf_caps populated else -ERRNO.
 971 */
 972int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
 973{
 974	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
 975	struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
 976								 *nom_perf;
 977	u64 high, low, nom;
 978	int ret = 0, regs_in_pcc = 0;
 979
 980	if (!cpc_desc) {
 981		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
 982		return -ENODEV;
 983	}
 984
 985	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
 986	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
 987	ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
 988	nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
 989
 990	/* Are any of the regs PCC ?*/
 991	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
 992		CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) {
 993		regs_in_pcc = 1;
 994		down_write(&pcc_data.pcc_lock);
 995		/* Ring doorbell once to update PCC subspace */
 996		if (send_pcc_cmd(CMD_READ) < 0) {
 997			ret = -EIO;
 998			goto out_err;
 999		}
1000	}
1001
1002	cpc_read(cpunum, highest_reg, &high);
1003	perf_caps->highest_perf = high;
1004
1005	cpc_read(cpunum, lowest_reg, &low);
1006	perf_caps->lowest_perf = low;
1007
1008	cpc_read(cpunum, nom_perf, &nom);
1009	perf_caps->nominal_perf = nom;
1010
1011	if (!high || !low || !nom)
1012		ret = -EFAULT;
1013
1014out_err:
1015	if (regs_in_pcc)
1016		up_write(&pcc_data.pcc_lock);
1017	return ret;
1018}
1019EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1020
1021/**
1022 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1023 * @cpunum: CPU from which to read counters.
1024 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1025 *
1026 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1027 */
1028int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1029{
1030	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1031	struct cpc_register_resource *delivered_reg, *reference_reg,
1032		*ref_perf_reg, *ctr_wrap_reg;
1033	u64 delivered, reference, ref_perf, ctr_wrap_time;
1034	int ret = 0, regs_in_pcc = 0;
1035
1036	if (!cpc_desc) {
1037		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1038		return -ENODEV;
1039	}
1040
1041	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1042	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1043	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1044	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1045
1046	/*
1047	 * If refernce perf register is not supported then we should
1048	 * use the nominal perf value
1049	 */
1050	if (!CPC_SUPPORTED(ref_perf_reg))
1051		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1052
1053	/* Are any of the regs PCC ?*/
1054	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1055		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1056		down_write(&pcc_data.pcc_lock);
1057		regs_in_pcc = 1;
1058		/* Ring doorbell once to update PCC subspace */
1059		if (send_pcc_cmd(CMD_READ) < 0) {
1060			ret = -EIO;
1061			goto out_err;
1062		}
1063	}
1064
1065	cpc_read(cpunum, delivered_reg, &delivered);
1066	cpc_read(cpunum, reference_reg, &reference);
1067	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1068
1069	/*
1070	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1071	 * performance counters are assumed to never wrap during the lifetime of
1072	 * platform
1073	 */
1074	ctr_wrap_time = (u64)(~((u64)0));
1075	if (CPC_SUPPORTED(ctr_wrap_reg))
1076		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1077
1078	if (!delivered || !reference ||	!ref_perf) {
1079		ret = -EFAULT;
1080		goto out_err;
1081	}
1082
1083	perf_fb_ctrs->delivered = delivered;
1084	perf_fb_ctrs->reference = reference;
1085	perf_fb_ctrs->reference_perf = ref_perf;
1086	perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time;
1087out_err:
1088	if (regs_in_pcc)
1089		up_write(&pcc_data.pcc_lock);
1090	return ret;
1091}
1092EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1093
1094/**
1095 * cppc_set_perf - Set a CPUs performance controls.
1096 * @cpu: CPU for which to set performance controls.
1097 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1098 *
1099 * Return: 0 for success, -ERRNO otherwise.
1100 */
1101int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1102{
1103	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1104	struct cpc_register_resource *desired_reg;
1105	int ret = 0;
1106
1107	if (!cpc_desc) {
1108		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1109		return -ENODEV;
1110	}
1111
1112	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1113
1114	/*
1115	 * This is Phase-I where we want to write to CPC registers
1116	 * -> We want all CPUs to be able to execute this phase in parallel
1117	 *
1118	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1119	 * achieve that goal here
1120	 */
1121	if (CPC_IN_PCC(desired_reg)) {
1122		down_read(&pcc_data.pcc_lock);	/* BEGIN Phase-I */
1123		if (pcc_data.platform_owns_pcc) {
1124			ret = check_pcc_chan(false);
1125			if (ret) {
1126				up_read(&pcc_data.pcc_lock);
1127				return ret;
1128			}
1129		}
1130		/*
1131		 * Update the pending_write to make sure a PCC CMD_READ will not
1132		 * arrive and steal the channel during the switch to write lock
1133		 */
1134		pcc_data.pending_pcc_write_cmd = true;
1135		cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
1136		cpc_desc->write_cmd_status = 0;
1137	}
1138
1139	/*
1140	 * Skip writing MIN/MAX until Linux knows how to come up with
1141	 * useful values.
1142	 */
1143	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1144
1145	if (CPC_IN_PCC(desired_reg))
1146		up_read(&pcc_data.pcc_lock);	/* END Phase-I */
1147	/*
1148	 * This is Phase-II where we transfer the ownership of PCC to Platform
1149	 *
1150	 * Short Summary: Basically if we think of a group of cppc_set_perf
1151	 * requests that happened in short overlapping interval. The last CPU to
1152	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1153	 *
1154	 * We have the following requirements for Phase-II:
1155	 *     1. We want to execute Phase-II only when there are no CPUs
1156	 * currently executing in Phase-I
1157	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1158	 * entering Phase-I.
1159	 *     3. We want only one CPU among all those who went through Phase-I
1160	 * to run phase-II
1161	 *
1162	 * If write_trylock fails to get the lock and doesn't transfer the
1163	 * PCC ownership to the platform, then one of the following will be TRUE
1164	 *     1. There is at-least one CPU in Phase-I which will later execute
1165	 * write_trylock, so the CPUs in Phase-I will be responsible for
1166	 * executing the Phase-II.
1167	 *     2. Some other CPU has beaten this CPU to successfully execute the
1168	 * write_trylock and has already acquired the write_lock. We know for a
1169	 * fact it(other CPU acquiring the write_lock) couldn't have happened
1170	 * before this CPU's Phase-I as we held the read_lock.
1171	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1172	 * down_write, in which case, send_pcc_cmd will check for pending
1173	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1174	 * So this CPU can be certain that its request will be delivered
1175	 *    So in all cases, this CPU knows that its request will be delivered
1176	 * by another CPU and can return
1177	 *
1178	 * After getting the down_write we still need to check for
1179	 * pending_pcc_write_cmd to take care of the following scenario
1180	 *    The thread running this code could be scheduled out between
1181	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1182	 * could have delivered the request to Platform by triggering the
1183	 * doorbell and transferred the ownership of PCC to platform. So this
1184	 * avoids triggering an unnecessary doorbell and more importantly before
1185	 * triggering the doorbell it makes sure that the PCC channel ownership
1186	 * is still with OSPM.
1187	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1188	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1189	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1190	 * case during a CMD_READ and if there are pending writes it delivers
1191	 * the write command before servicing the read command
1192	 */
1193	if (CPC_IN_PCC(desired_reg)) {
1194		if (down_write_trylock(&pcc_data.pcc_lock)) {	/* BEGIN Phase-II */
1195			/* Update only if there are pending write commands */
1196			if (pcc_data.pending_pcc_write_cmd)
1197				send_pcc_cmd(CMD_WRITE);
1198			up_write(&pcc_data.pcc_lock);		/* END Phase-II */
1199		} else
1200			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1201			wait_event(pcc_data.pcc_write_wait_q,
1202				cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
1203
1204		/* send_pcc_cmd updates the status in case of failure */
1205		ret = cpc_desc->write_cmd_status;
1206	}
1207	return ret;
1208}
1209EXPORT_SYMBOL_GPL(cppc_set_perf);
1210
1211/**
1212 * cppc_get_transition_latency - returns frequency transition latency in ns
1213 *
1214 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1215 * transition latency for perfromance change requests. The closest we have
1216 * is the timing information from the PCCT tables which provides the info
1217 * on the number and frequency of PCC commands the platform can handle.
1218 */
1219unsigned int cppc_get_transition_latency(int cpu_num)
1220{
1221	/*
1222	 * Expected transition latency is based on the PCCT timing values
1223	 * Below are definition from ACPI spec:
1224	 * pcc_nominal- Expected latency to process a command, in microseconds
1225	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1226	 *              channel can support, reported in commands per minute. 0
1227	 *              indicates no limitation.
1228	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1229	 *              completion of a command before issuing the next command,
1230	 *              in microseconds.
1231	 */
1232	unsigned int latency_ns = 0;
1233	struct cpc_desc *cpc_desc;
1234	struct cpc_register_resource *desired_reg;
1235
1236	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1237	if (!cpc_desc)
1238		return CPUFREQ_ETERNAL;
1239
1240	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1241	if (!CPC_IN_PCC(desired_reg))
1242		return CPUFREQ_ETERNAL;
1243
1244	if (pcc_data.pcc_mpar)
1245		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
1246
1247	latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
1248	latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
1249
1250	return latency_ns;
1251}
1252EXPORT_SYMBOL_GPL(cppc_get_transition_latency);