Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   4 *
   5 * (C) Copyright 2014, 2015 Linaro Ltd.
   6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   7 *
   8 * CPPC describes a few methods for controlling CPU performance using
   9 * information from a per CPU table called CPC. This table is described in
  10 * the ACPI v5.0+ specification. The table consists of a list of
  11 * registers which may be memory mapped or hardware registers and also may
  12 * include some static integer values.
  13 *
  14 * CPU performance is on an abstract continuous scale as against a discretized
  15 * P-state scale which is tied to CPU frequency only. In brief, the basic
  16 * operation involves:
  17 *
  18 * - OS makes a CPU performance request. (Can provide min and max bounds)
  19 *
  20 * - Platform (such as BMC) is free to optimize request within requested bounds
  21 *   depending on power/thermal budgets etc.
  22 *
  23 * - Platform conveys its decision back to OS
  24 *
  25 * The communication between OS and platform occurs through another medium
  26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
  27 * mechanism which includes doorbell semantics to indicate register updates.
  28 * See drivers/mailbox/pcc.c for details on PCC.
  29 *
  30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  31 * above specifications.
  32 */
  33
  34#define pr_fmt(fmt)	"ACPI CPPC: " fmt
  35
  36#include <linux/cpufreq.h>
  37#include <linux/delay.h>
  38#include <linux/iopoll.h>
  39#include <linux/ktime.h>
  40#include <linux/rwsem.h>
  41#include <linux/wait.h>
 
  42
  43#include <acpi/cppc_acpi.h>
  44
  45struct cppc_pcc_data {
  46	struct mbox_chan *pcc_channel;
  47	void __iomem *pcc_comm_addr;
  48	bool pcc_channel_acquired;
  49	unsigned int deadline_us;
  50	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  51
  52	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
  53	bool platform_owns_pcc;		/* Ownership of PCC subspace */
  54	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
  55
  56	/*
  57	 * Lock to provide controlled access to the PCC channel.
  58	 *
  59	 * For performance critical usecases(currently cppc_set_perf)
  60	 *	We need to take read_lock and check if channel belongs to OSPM
  61	 * before reading or writing to PCC subspace
  62	 *	We need to take write_lock before transferring the channel
  63	 * ownership to the platform via a Doorbell
  64	 *	This allows us to batch a number of CPPC requests if they happen
  65	 * to originate in about the same time
  66	 *
  67	 * For non-performance critical usecases(init)
  68	 *	Take write_lock for all purposes which gives exclusive access
  69	 */
  70	struct rw_semaphore pcc_lock;
  71
  72	/* Wait queue for CPUs whose requests were batched */
  73	wait_queue_head_t pcc_write_wait_q;
  74	ktime_t last_cmd_cmpl_time;
  75	ktime_t last_mpar_reset;
  76	int mpar_count;
  77	int refcount;
  78};
  79
  80/* Array to represent the PCC channel per subspace ID */
  81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  82/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
  83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  84
  85/*
  86 * The cpc_desc structure contains the ACPI register details
  87 * as described in the per CPU _CPC tables. The details
  88 * include the type of register (e.g. PCC, System IO, FFH etc.)
  89 * and destination addresses which lets us READ/WRITE CPU performance
  90 * information using the appropriate I/O methods.
  91 */
  92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  93
  94/* pcc mapped address + header size + offset within PCC subspace */
  95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  96						0x8 + (offs))
  97
  98/* Check if a CPC register is in PCC */
  99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
 100				(cpc)->cpc_entry.reg.space_id ==	\
 101				ACPI_ADR_SPACE_PLATFORM_COMM)
 102
 103/* Evalutes to True if reg is a NULL register descriptor */
 
 
 
 
 
 
 
 
 
 
 104#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 105				(reg)->address == 0 &&			\
 106				(reg)->bit_width == 0 &&		\
 107				(reg)->bit_offset == 0 &&		\
 108				(reg)->access_width == 0)
 109
 110/* Evalutes to True if an optional cpc field is supported */
 111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
 112				!!(cpc)->cpc_entry.int_value :		\
 113				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
 114/*
 115 * Arbitrary Retries in case the remote processor is slow to respond
 116 * to PCC commands. Keeping it high enough to cover emulators where
 117 * the processors run painfully slow.
 118 */
 119#define NUM_RETRIES 500ULL
 120
 121struct cppc_attr {
 122	struct attribute attr;
 123	ssize_t (*show)(struct kobject *kobj,
 124			struct attribute *attr, char *buf);
 125	ssize_t (*store)(struct kobject *kobj,
 126			struct attribute *attr, const char *c, ssize_t count);
 127};
 128
 129#define define_one_cppc_ro(_name)		\
 130static struct cppc_attr _name =			\
 131__ATTR(_name, 0444, show_##_name, NULL)
 132
 133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 134
 135#define show_cppc_data(access_fn, struct_name, member_name)		\
 136	static ssize_t show_##member_name(struct kobject *kobj,		\
 137					struct attribute *attr,	char *buf) \
 138	{								\
 139		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
 140		struct struct_name st_name = {0};			\
 141		int ret;						\
 142									\
 143		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
 144		if (ret)						\
 145			return ret;					\
 146									\
 147		return scnprintf(buf, PAGE_SIZE, "%llu\n",		\
 148				(u64)st_name.member_name);		\
 149	}								\
 150	define_one_cppc_ro(member_name)
 151
 152show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
 153show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
 154show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
 155show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
 156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
 157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
 158
 159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 161
 162static ssize_t show_feedback_ctrs(struct kobject *kobj,
 163		struct attribute *attr, char *buf)
 164{
 165	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 166	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 167	int ret;
 168
 169	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 170	if (ret)
 171		return ret;
 172
 173	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
 174			fb_ctrs.reference, fb_ctrs.delivered);
 175}
 176define_one_cppc_ro(feedback_ctrs);
 177
 178static struct attribute *cppc_attrs[] = {
 179	&feedback_ctrs.attr,
 180	&reference_perf.attr,
 181	&wraparound_time.attr,
 182	&highest_perf.attr,
 183	&lowest_perf.attr,
 184	&lowest_nonlinear_perf.attr,
 185	&nominal_perf.attr,
 186	&nominal_freq.attr,
 187	&lowest_freq.attr,
 188	NULL
 189};
 
 190
 191static struct kobj_type cppc_ktype = {
 192	.sysfs_ops = &kobj_sysfs_ops,
 193	.default_attrs = cppc_attrs,
 194};
 195
 196static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
 197{
 198	int ret, status;
 199	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 200	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 201		pcc_ss_data->pcc_comm_addr;
 202
 203	if (!pcc_ss_data->platform_owns_pcc)
 204		return 0;
 205
 206	/*
 207	 * Poll PCC status register every 3us(delay_us) for maximum of
 208	 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
 209	 */
 210	ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
 211					status & PCC_CMD_COMPLETE_MASK, 3,
 212					pcc_ss_data->deadline_us);
 213
 214	if (likely(!ret)) {
 215		pcc_ss_data->platform_owns_pcc = false;
 216		if (chk_err_bit && (status & PCC_ERROR_MASK))
 217			ret = -EIO;
 218	}
 219
 220	if (unlikely(ret))
 221		pr_err("PCC check channel failed for ss: %d. ret=%d\n",
 222		       pcc_ss_id, ret);
 223
 224	return ret;
 225}
 226
 227/*
 228 * This function transfers the ownership of the PCC to the platform
 229 * So it must be called while holding write_lock(pcc_lock)
 230 */
 231static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
 232{
 233	int ret = -EIO, i;
 234	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 235	struct acpi_pcct_shared_memory *generic_comm_base =
 236		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
 237	unsigned int time_delta;
 238
 239	/*
 240	 * For CMD_WRITE we know for a fact the caller should have checked
 241	 * the channel before writing to PCC space
 242	 */
 243	if (cmd == CMD_READ) {
 244		/*
 245		 * If there are pending cpc_writes, then we stole the channel
 246		 * before write completion, so first send a WRITE command to
 247		 * platform
 248		 */
 249		if (pcc_ss_data->pending_pcc_write_cmd)
 250			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
 251
 252		ret = check_pcc_chan(pcc_ss_id, false);
 253		if (ret)
 254			goto end;
 255	} else /* CMD_WRITE */
 256		pcc_ss_data->pending_pcc_write_cmd = FALSE;
 257
 258	/*
 259	 * Handle the Minimum Request Turnaround Time(MRTT)
 260	 * "The minimum amount of time that OSPM must wait after the completion
 261	 * of a command before issuing the next command, in microseconds"
 262	 */
 263	if (pcc_ss_data->pcc_mrtt) {
 264		time_delta = ktime_us_delta(ktime_get(),
 265					    pcc_ss_data->last_cmd_cmpl_time);
 266		if (pcc_ss_data->pcc_mrtt > time_delta)
 267			udelay(pcc_ss_data->pcc_mrtt - time_delta);
 268	}
 269
 270	/*
 271	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 272	 * "The maximum number of periodic requests that the subspace channel can
 273	 * support, reported in commands per minute. 0 indicates no limitation."
 274	 *
 275	 * This parameter should be ideally zero or large enough so that it can
 276	 * handle maximum number of requests that all the cores in the system can
 277	 * collectively generate. If it is not, we will follow the spec and just
 278	 * not send the request to the platform after hitting the MPAR limit in
 279	 * any 60s window
 280	 */
 281	if (pcc_ss_data->pcc_mpar) {
 282		if (pcc_ss_data->mpar_count == 0) {
 283			time_delta = ktime_ms_delta(ktime_get(),
 284						    pcc_ss_data->last_mpar_reset);
 285			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
 286				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
 287					 pcc_ss_id);
 288				ret = -EIO;
 289				goto end;
 290			}
 291			pcc_ss_data->last_mpar_reset = ktime_get();
 292			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
 293		}
 294		pcc_ss_data->mpar_count--;
 295	}
 296
 297	/* Write to the shared comm region. */
 298	writew_relaxed(cmd, &generic_comm_base->command);
 299
 300	/* Flip CMD COMPLETE bit */
 301	writew_relaxed(0, &generic_comm_base->status);
 302
 303	pcc_ss_data->platform_owns_pcc = true;
 304
 305	/* Ring doorbell */
 306	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
 307	if (ret < 0) {
 308		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
 309		       pcc_ss_id, cmd, ret);
 310		goto end;
 311	}
 312
 313	/* wait for completion and check for PCC errro bit */
 314	ret = check_pcc_chan(pcc_ss_id, true);
 315
 316	if (pcc_ss_data->pcc_mrtt)
 317		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
 318
 319	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
 320		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
 321	else
 322		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
 323
 324end:
 325	if (cmd == CMD_WRITE) {
 326		if (unlikely(ret)) {
 327			for_each_possible_cpu(i) {
 328				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 
 329				if (!desc)
 330					continue;
 331
 332				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
 333					desc->write_cmd_status = ret;
 334			}
 335		}
 336		pcc_ss_data->pcc_write_cnt++;
 337		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
 338	}
 339
 340	return ret;
 341}
 342
 343static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 344{
 345	if (ret < 0)
 346		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 347				*(u16 *)msg, ret);
 348	else
 349		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 350				*(u16 *)msg, ret);
 351}
 352
 353struct mbox_client cppc_mbox_cl = {
 354	.tx_done = cppc_chan_tx_done,
 355	.knows_txdone = true,
 356};
 357
 358static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 359{
 360	int result = -EFAULT;
 361	acpi_status status = AE_OK;
 362	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 363	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 364	struct acpi_buffer state = {0, NULL};
 365	union acpi_object  *psd = NULL;
 366	struct acpi_psd_package *pdomain;
 367
 368	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
 369					    &buffer, ACPI_TYPE_PACKAGE);
 370	if (status == AE_NOT_FOUND)	/* _PSD is optional */
 371		return 0;
 372	if (ACPI_FAILURE(status))
 373		return -ENODEV;
 374
 375	psd = buffer.pointer;
 376	if (!psd || psd->package.count != 1) {
 377		pr_debug("Invalid _PSD data\n");
 378		goto end;
 379	}
 380
 381	pdomain = &(cpc_ptr->domain_info);
 382
 383	state.length = sizeof(struct acpi_psd_package);
 384	state.pointer = pdomain;
 385
 386	status = acpi_extract_package(&(psd->package.elements[0]),
 387		&format, &state);
 388	if (ACPI_FAILURE(status)) {
 389		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 390		goto end;
 391	}
 392
 393	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 394		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 395		goto end;
 396	}
 397
 398	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 399		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 400		goto end;
 401	}
 402
 403	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 404	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 405	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 406		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 407		goto end;
 408	}
 409
 410	result = 0;
 411end:
 412	kfree(buffer.pointer);
 413	return result;
 414}
 415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416/**
 417 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 418 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 
 419 *
 420 *	Return: 0 for success or negative value for err.
 421 */
 422int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
 423{
 424	int count_target;
 425	int retval = 0;
 426	unsigned int i, j;
 427	cpumask_var_t covered_cpus;
 428	struct cppc_cpudata *pr, *match_pr;
 429	struct acpi_psd_package *pdomain;
 430	struct acpi_psd_package *match_pdomain;
 431	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 432
 433	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 434		return -ENOMEM;
 435
 436	/*
 437	 * Now that we have _PSD data from all CPUs, let's setup P-state
 438	 * domain info.
 439	 */
 440	for_each_possible_cpu(i) {
 441		pr = all_cpu_data[i];
 442		if (!pr)
 443			continue;
 444
 445		if (cpumask_test_cpu(i, covered_cpus))
 446			continue;
 
 
 447
 448		cpc_ptr = per_cpu(cpc_desc_ptr, i);
 449		if (!cpc_ptr) {
 450			retval = -EFAULT;
 451			goto err_ret;
 452		}
 
 
 
 453
 454		pdomain = &(cpc_ptr->domain_info);
 455		cpumask_set_cpu(i, pr->shared_cpu_map);
 456		cpumask_set_cpu(i, covered_cpus);
 457		if (pdomain->num_processors <= 1)
 458			continue;
 459
 460		/* Validate the Domain info */
 461		count_target = pdomain->num_processors;
 462		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 463			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 464		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 465			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
 466		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 467			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 468
 469		for_each_possible_cpu(j) {
 470			if (i == j)
 471				continue;
 472
 473			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 474			if (!match_cpc_ptr) {
 475				retval = -EFAULT;
 476				goto err_ret;
 477			}
 478
 479			match_pdomain = &(match_cpc_ptr->domain_info);
 480			if (match_pdomain->domain != pdomain->domain)
 481				continue;
 482
 483			/* Here i and j are in the same domain */
 484			if (match_pdomain->num_processors != count_target) {
 485				retval = -EFAULT;
 486				goto err_ret;
 487			}
 488
 489			if (pdomain->coord_type != match_pdomain->coord_type) {
 490				retval = -EFAULT;
 491				goto err_ret;
 492			}
 493
 494			cpumask_set_cpu(j, covered_cpus);
 495			cpumask_set_cpu(j, pr->shared_cpu_map);
 496		}
 497
 498		for_each_possible_cpu(j) {
 499			if (i == j)
 500				continue;
 501
 502			match_pr = all_cpu_data[j];
 503			if (!match_pr)
 504				continue;
 505
 506			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 507			if (!match_cpc_ptr) {
 508				retval = -EFAULT;
 509				goto err_ret;
 510			}
 511
 512			match_pdomain = &(match_cpc_ptr->domain_info);
 513			if (match_pdomain->domain != pdomain->domain)
 514				continue;
 515
 516			match_pr->shared_type = pr->shared_type;
 517			cpumask_copy(match_pr->shared_cpu_map,
 518				     pr->shared_cpu_map);
 519		}
 520	}
 521
 522err_ret:
 523	for_each_possible_cpu(i) {
 524		pr = all_cpu_data[i];
 525		if (!pr)
 526			continue;
 527
 528		/* Assume no coordination on any error parsing domain info */
 529		if (retval) {
 530			cpumask_clear(pr->shared_cpu_map);
 531			cpumask_set_cpu(i, pr->shared_cpu_map);
 532			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 533		}
 534	}
 535
 536	free_cpumask_var(covered_cpus);
 537	return retval;
 538}
 539EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 540
 541static int register_pcc_channel(int pcc_ss_idx)
 542{
 543	struct acpi_pcct_hw_reduced *cppc_ss;
 544	u64 usecs_lat;
 545
 546	if (pcc_ss_idx >= 0) {
 547		pcc_data[pcc_ss_idx]->pcc_channel =
 548			pcc_mbox_request_channel(&cppc_mbox_cl,	pcc_ss_idx);
 549
 550		if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
 551			pr_err("Failed to find PCC channel for subspace %d\n",
 552			       pcc_ss_idx);
 553			return -ENODEV;
 554		}
 555
 556		/*
 557		 * The PCC mailbox controller driver should
 558		 * have parsed the PCCT (global table of all
 559		 * PCC channels) and stored pointers to the
 560		 * subspace communication region in con_priv.
 561		 */
 562		cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
 563
 564		if (!cppc_ss) {
 565			pr_err("No PCC subspace found for %d CPPC\n",
 566			       pcc_ss_idx);
 567			return -ENODEV;
 568		}
 569
 570		/*
 571		 * cppc_ss->latency is just a Nominal value. In reality
 572		 * the remote processor could be much slower to reply.
 573		 * So add an arbitrary amount of wait on top of Nominal.
 574		 */
 575		usecs_lat = NUM_RETRIES * cppc_ss->latency;
 576		pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
 577		pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
 578		pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
 579		pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
 580
 581		pcc_data[pcc_ss_idx]->pcc_comm_addr =
 582			acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
 
 583		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
 584			pr_err("Failed to ioremap PCC comm region mem for %d\n",
 585			       pcc_ss_idx);
 586			return -ENOMEM;
 587		}
 588
 589		/* Set flag so that we don't come here for each CPU. */
 590		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
 591	}
 592
 593	return 0;
 594}
 595
 596/**
 597 * cpc_ffh_supported() - check if FFH reading supported
 598 *
 599 * Check if the architecture has support for functional fixed hardware
 600 * read/write capability.
 601 *
 602 * Return: true for supported, false for not supported
 603 */
 604bool __weak cpc_ffh_supported(void)
 605{
 606	return false;
 607}
 608
 609/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 610 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 611 *
 612 * Check and allocate the cppc_pcc_data memory.
 613 * In some processor configurations it is possible that same subspace
 614 * is shared between multiple CPUs. This is seen especially in CPUs
 615 * with hardware multi-threading support.
 616 *
 617 * Return: 0 for success, errno for failure
 618 */
 619int pcc_data_alloc(int pcc_ss_id)
 620{
 621	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
 622		return -EINVAL;
 623
 624	if (pcc_data[pcc_ss_id]) {
 625		pcc_data[pcc_ss_id]->refcount++;
 626	} else {
 627		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
 628					      GFP_KERNEL);
 629		if (!pcc_data[pcc_ss_id])
 630			return -ENOMEM;
 631		pcc_data[pcc_ss_id]->refcount++;
 632	}
 633
 634	return 0;
 635}
 636
 637/* Check if CPPC revision + num_ent combination is supported */
 638static bool is_cppc_supported(int revision, int num_ent)
 639{
 640	int expected_num_ent;
 641
 642	switch (revision) {
 643	case CPPC_V2_REV:
 644		expected_num_ent = CPPC_V2_NUM_ENT;
 645		break;
 646	case CPPC_V3_REV:
 647		expected_num_ent = CPPC_V3_NUM_ENT;
 648		break;
 649	default:
 650		pr_debug("Firmware exports unsupported CPPC revision: %d\n",
 651			revision);
 652		return false;
 653	}
 654
 655	if (expected_num_ent != num_ent) {
 656		pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
 657			num_ent, expected_num_ent, revision);
 658		return false;
 659	}
 660
 661	return true;
 662}
 663
 664/*
 665 * An example CPC table looks like the following.
 666 *
 667 *	Name(_CPC, Package()
 668 *			{
 669 *			17,
 670 *			NumEntries
 671 *			1,
 672 *			// Revision
 673 *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 674 *			// Highest Performance
 675 *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 676 *			// Nominal Performance
 677 *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 678 *			// Lowest Nonlinear Performance
 679 *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 680 *			// Lowest Performance
 681 *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 682 *			// Guaranteed Performance Register
 683 *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 684 *			// Desired Performance Register
 685 *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 686 *			..
 687 *			..
 688 *			..
 689 *
 690 *		}
 691 * Each Register() encodes how to access that specific register.
 692 * e.g. a sample PCC entry has the following encoding:
 693 *
 694 *	Register (
 695 *		PCC,
 696 *		AddressSpaceKeyword
 697 *		8,
 698 *		//RegisterBitWidth
 699 *		8,
 700 *		//RegisterBitOffset
 701 *		0x30,
 702 *		//RegisterAddress
 703 *		9
 704 *		//AccessSize (subspace ID)
 705 *		0
 706 *		)
 707 *	}
 708 */
 709
 
 
 
 
 710/**
 711 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 712 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 713 *
 714 *	Return: 0 for success or negative value for err.
 715 */
 716int acpi_cppc_processor_probe(struct acpi_processor *pr)
 717{
 718	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 719	union acpi_object *out_obj, *cpc_obj;
 720	struct cpc_desc *cpc_ptr;
 721	struct cpc_reg *gas_t;
 722	struct device *cpu_dev;
 723	acpi_handle handle = pr->handle;
 724	unsigned int num_ent, i, cpc_rev;
 725	int pcc_subspace_id = -1;
 726	acpi_status status;
 727	int ret = -EFAULT;
 
 
 
 
 
 
 728
 729	/* Parse the ACPI _CPC table for this CPU. */
 730	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 731			ACPI_TYPE_PACKAGE);
 732	if (ACPI_FAILURE(status)) {
 733		ret = -ENODEV;
 734		goto out_buf_free;
 735	}
 736
 737	out_obj = (union acpi_object *) output.pointer;
 738
 739	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 740	if (!cpc_ptr) {
 741		ret = -ENOMEM;
 742		goto out_buf_free;
 743	}
 744
 745	/* First entry is NumEntries. */
 746	cpc_obj = &out_obj->package.elements[0];
 747	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 748		num_ent = cpc_obj->integer.value;
 
 
 
 
 
 749	} else {
 750		pr_debug("Unexpected entry type(%d) for NumEntries\n",
 751				cpc_obj->type);
 752		goto out_free;
 753	}
 754	cpc_ptr->num_entries = num_ent;
 755
 756	/* Second entry should be revision. */
 757	cpc_obj = &out_obj->package.elements[1];
 758	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 759		cpc_rev = cpc_obj->integer.value;
 760	} else {
 761		pr_debug("Unexpected entry type(%d) for Revision\n",
 762				cpc_obj->type);
 
 
 
 
 
 
 763		goto out_free;
 764	}
 765	cpc_ptr->version = cpc_rev;
 766
 767	if (!is_cppc_supported(cpc_rev, num_ent))
 
 
 
 
 
 
 
 
 
 768		goto out_free;
 
 
 
 
 
 
 
 
 769
 770	/* Iterate through remaining entries in _CPC */
 771	for (i = 2; i < num_ent; i++) {
 772		cpc_obj = &out_obj->package.elements[i];
 773
 774		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 775			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 776			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 777		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 778			gas_t = (struct cpc_reg *)
 779				cpc_obj->buffer.pointer;
 780
 781			/*
 782			 * The PCC Subspace index is encoded inside
 783			 * the CPC table entries. The same PCC index
 784			 * will be used for all the PCC entries,
 785			 * so extract it only once.
 786			 */
 787			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 788				if (pcc_subspace_id < 0) {
 789					pcc_subspace_id = gas_t->access_width;
 790					if (pcc_data_alloc(pcc_subspace_id))
 791						goto out_free;
 792				} else if (pcc_subspace_id != gas_t->access_width) {
 793					pr_debug("Mismatched PCC ids.\n");
 
 794					goto out_free;
 795				}
 796			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 797				if (gas_t->address) {
 798					void __iomem *addr;
 799
 
 
 
 
 
 
 800					addr = ioremap(gas_t->address, gas_t->bit_width/8);
 801					if (!addr)
 802						goto out_free;
 803					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 804				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805			} else {
 806				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 807					/* Support only PCC ,SYS MEM and FFH type regs */
 808					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
 
 809					goto out_free;
 810				}
 811			}
 812
 813			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 814			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 815		} else {
 816			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
 
 817			goto out_free;
 818		}
 819	}
 820	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
 821
 822	/*
 823	 * Initialize the remaining cpc_regs as unsupported.
 824	 * Example: In case FW exposes CPPC v2, the below loop will initialize
 825	 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
 826	 */
 827	for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
 828		cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
 829		cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
 830	}
 831
 832
 833	/* Store CPU Logical ID */
 834	cpc_ptr->cpu_id = pr->id;
 835
 836	/* Parse PSD data for this CPU */
 837	ret = acpi_get_psd(cpc_ptr, handle);
 838	if (ret)
 839		goto out_free;
 840
 841	/* Register PCC channel once for all PCC subspace ID. */
 842	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
 843		ret = register_pcc_channel(pcc_subspace_id);
 844		if (ret)
 845			goto out_free;
 846
 847		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
 848		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
 849	}
 850
 851	/* Everything looks okay */
 852	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 853
 854	/* Add per logical CPU nodes for reading its feedback counters. */
 855	cpu_dev = get_cpu_device(pr->id);
 856	if (!cpu_dev) {
 857		ret = -EINVAL;
 858		goto out_free;
 859	}
 860
 861	/* Plug PSD data into this CPU's CPC descriptor. */
 862	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 863
 864	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 865			"acpi_cppc");
 866	if (ret) {
 867		per_cpu(cpc_desc_ptr, pr->id) = NULL;
 
 868		goto out_free;
 869	}
 870
 
 
 871	kfree(output.pointer);
 872	return 0;
 873
 874out_free:
 875	/* Free all the mapped sys mem areas for this CPU */
 876	for (i = 2; i < cpc_ptr->num_entries; i++) {
 877		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 878
 879		if (addr)
 880			iounmap(addr);
 881	}
 882	kfree(cpc_ptr);
 883
 884out_buf_free:
 885	kfree(output.pointer);
 886	return ret;
 887}
 888EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 889
 890/**
 891 * acpi_cppc_processor_exit - Cleanup CPC structs.
 892 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 893 *
 894 * Return: Void
 895 */
 896void acpi_cppc_processor_exit(struct acpi_processor *pr)
 897{
 898	struct cpc_desc *cpc_ptr;
 899	unsigned int i;
 900	void __iomem *addr;
 901	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
 902
 903	if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
 904		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
 905			pcc_data[pcc_ss_id]->refcount--;
 906			if (!pcc_data[pcc_ss_id]->refcount) {
 907				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
 908				kfree(pcc_data[pcc_ss_id]);
 909				pcc_data[pcc_ss_id] = NULL;
 910			}
 911		}
 912	}
 913
 914	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 915	if (!cpc_ptr)
 916		return;
 917
 918	/* Free all the mapped sys mem areas for this CPU */
 919	for (i = 2; i < cpc_ptr->num_entries; i++) {
 920		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 921		if (addr)
 922			iounmap(addr);
 923	}
 924
 925	kobject_put(&cpc_ptr->kobj);
 926	kfree(cpc_ptr);
 927}
 928EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 929
 930/**
 931 * cpc_read_ffh() - Read FFH register
 932 * @cpunum:	CPU number to read
 933 * @reg:	cppc register information
 934 * @val:	place holder for return value
 935 *
 936 * Read bit_width bits from a specified address and bit_offset
 937 *
 938 * Return: 0 for success and error code
 939 */
 940int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 941{
 942	return -ENOTSUPP;
 943}
 944
 945/**
 946 * cpc_write_ffh() - Write FFH register
 947 * @cpunum:	CPU number to write
 948 * @reg:	cppc register information
 949 * @val:	value to write
 950 *
 951 * Write value of bit_width bits to a specified address and bit_offset
 952 *
 953 * Return: 0 for success and error code
 954 */
 955int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 956{
 957	return -ENOTSUPP;
 958}
 959
 960/*
 961 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 962 * as fast as possible. We have already mapped the PCC subspace during init, so
 963 * we can directly write to it.
 964 */
 965
 966static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 967{
 968	int ret_val = 0;
 969	void __iomem *vaddr = 0;
 970	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 971	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 972
 973	if (reg_res->type == ACPI_TYPE_INTEGER) {
 974		*val = reg_res->cpc_entry.int_value;
 975		return ret_val;
 976	}
 977
 978	*val = 0;
 979	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 980		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
 981	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 982		vaddr = reg_res->sys_mem_vaddr;
 983	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 984		return cpc_read_ffh(cpu, reg, val);
 985	else
 986		return acpi_os_read_memory((acpi_physical_address)reg->address,
 987				val, reg->bit_width);
 988
 989	switch (reg->bit_width) {
 990		case 8:
 991			*val = readb_relaxed(vaddr);
 992			break;
 993		case 16:
 994			*val = readw_relaxed(vaddr);
 995			break;
 996		case 32:
 997			*val = readl_relaxed(vaddr);
 998			break;
 999		case 64:
1000			*val = readq_relaxed(vaddr);
1001			break;
1002		default:
1003			pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1004				 reg->bit_width, pcc_ss_id);
1005			ret_val = -EFAULT;
1006	}
1007
1008	return ret_val;
1009}
1010
1011static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1012{
1013	int ret_val = 0;
1014	void __iomem *vaddr = 0;
1015	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1016	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1017
1018	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
1019		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1020	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1021		vaddr = reg_res->sys_mem_vaddr;
1022	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1023		return cpc_write_ffh(cpu, reg, val);
1024	else
1025		return acpi_os_write_memory((acpi_physical_address)reg->address,
1026				val, reg->bit_width);
1027
1028	switch (reg->bit_width) {
1029		case 8:
1030			writeb_relaxed(val, vaddr);
1031			break;
1032		case 16:
1033			writew_relaxed(val, vaddr);
1034			break;
1035		case 32:
1036			writel_relaxed(val, vaddr);
1037			break;
1038		case 64:
1039			writeq_relaxed(val, vaddr);
1040			break;
1041		default:
1042			pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1043				 reg->bit_width, pcc_ss_id);
1044			ret_val = -EFAULT;
1045			break;
1046	}
1047
1048	return ret_val;
1049}
1050
1051/**
1052 * cppc_get_desired_perf - Get the value of desired performance register.
1053 * @cpunum: CPU from which to get desired performance.
1054 * @desired_perf: address of a variable to store the returned desired performance
1055 *
1056 * Return: 0 for success, -EIO otherwise.
1057 */
1058int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1059{
1060	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1061	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1062	struct cpc_register_resource *desired_reg;
1063	struct cppc_pcc_data *pcc_ss_data = NULL;
1064
1065	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
 
 
 
1066
1067	if (CPC_IN_PCC(desired_reg)) {
 
 
 
 
1068		int ret = 0;
1069
1070		if (pcc_ss_id < 0)
1071			return -EIO;
1072
1073		pcc_ss_data = pcc_data[pcc_ss_id];
1074
1075		down_write(&pcc_ss_data->pcc_lock);
1076
1077		if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1078			cpc_read(cpunum, desired_reg, desired_perf);
1079		else
1080			ret = -EIO;
1081
1082		up_write(&pcc_ss_data->pcc_lock);
1083
1084		return ret;
1085	}
1086
1087	cpc_read(cpunum, desired_reg, desired_perf);
1088
1089	return 0;
1090}
 
 
 
 
 
 
 
 
 
 
 
 
1091EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1092
1093/**
 
 
 
 
 
 
 
 
 
 
 
 
1094 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1095 * @cpunum: CPU from which to get capabilities info.
1096 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1097 *
1098 * Return: 0 for success with perf_caps populated else -ERRNO.
1099 */
1100int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1101{
1102	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1103	struct cpc_register_resource *highest_reg, *lowest_reg,
1104		*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1105		*low_freq_reg = NULL, *nom_freq_reg = NULL;
1106	u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1107	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1108	struct cppc_pcc_data *pcc_ss_data = NULL;
1109	int ret = 0, regs_in_pcc = 0;
1110
1111	if (!cpc_desc) {
1112		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1113		return -ENODEV;
1114	}
1115
1116	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1117	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1118	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1119	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1120	low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1121	nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1122	guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1123
1124	/* Are any of the regs PCC ?*/
1125	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1126		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1127		CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1128		if (pcc_ss_id < 0) {
1129			pr_debug("Invalid pcc_ss_id\n");
1130			return -ENODEV;
1131		}
1132		pcc_ss_data = pcc_data[pcc_ss_id];
1133		regs_in_pcc = 1;
1134		down_write(&pcc_ss_data->pcc_lock);
1135		/* Ring doorbell once to update PCC subspace */
1136		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1137			ret = -EIO;
1138			goto out_err;
1139		}
1140	}
1141
1142	cpc_read(cpunum, highest_reg, &high);
1143	perf_caps->highest_perf = high;
1144
1145	cpc_read(cpunum, lowest_reg, &low);
1146	perf_caps->lowest_perf = low;
1147
1148	cpc_read(cpunum, nominal_reg, &nom);
1149	perf_caps->nominal_perf = nom;
1150
1151	if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1152	    IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1153		perf_caps->guaranteed_perf = 0;
1154	} else {
1155		cpc_read(cpunum, guaranteed_reg, &guaranteed);
1156		perf_caps->guaranteed_perf = guaranteed;
1157	}
1158
1159	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1160	perf_caps->lowest_nonlinear_perf = min_nonlinear;
1161
1162	if (!high || !low || !nom || !min_nonlinear)
1163		ret = -EFAULT;
1164
1165	/* Read optional lowest and nominal frequencies if present */
1166	if (CPC_SUPPORTED(low_freq_reg))
1167		cpc_read(cpunum, low_freq_reg, &low_f);
1168
1169	if (CPC_SUPPORTED(nom_freq_reg))
1170		cpc_read(cpunum, nom_freq_reg, &nom_f);
1171
1172	perf_caps->lowest_freq = low_f;
1173	perf_caps->nominal_freq = nom_f;
1174
1175
1176out_err:
1177	if (regs_in_pcc)
1178		up_write(&pcc_ss_data->pcc_lock);
1179	return ret;
1180}
1181EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1182
1183/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1185 * @cpunum: CPU from which to read counters.
1186 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1187 *
1188 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1189 */
1190int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1191{
1192	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1193	struct cpc_register_resource *delivered_reg, *reference_reg,
1194		*ref_perf_reg, *ctr_wrap_reg;
1195	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1196	struct cppc_pcc_data *pcc_ss_data = NULL;
1197	u64 delivered, reference, ref_perf, ctr_wrap_time;
1198	int ret = 0, regs_in_pcc = 0;
1199
1200	if (!cpc_desc) {
1201		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1202		return -ENODEV;
1203	}
1204
1205	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1206	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1207	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1208	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1209
1210	/*
1211	 * If reference perf register is not supported then we should
1212	 * use the nominal perf value
1213	 */
1214	if (!CPC_SUPPORTED(ref_perf_reg))
1215		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1216
1217	/* Are any of the regs PCC ?*/
1218	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1219		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1220		if (pcc_ss_id < 0) {
1221			pr_debug("Invalid pcc_ss_id\n");
1222			return -ENODEV;
1223		}
1224		pcc_ss_data = pcc_data[pcc_ss_id];
1225		down_write(&pcc_ss_data->pcc_lock);
1226		regs_in_pcc = 1;
1227		/* Ring doorbell once to update PCC subspace */
1228		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1229			ret = -EIO;
1230			goto out_err;
1231		}
1232	}
1233
1234	cpc_read(cpunum, delivered_reg, &delivered);
1235	cpc_read(cpunum, reference_reg, &reference);
1236	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1237
1238	/*
1239	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1240	 * performance counters are assumed to never wrap during the lifetime of
1241	 * platform
1242	 */
1243	ctr_wrap_time = (u64)(~((u64)0));
1244	if (CPC_SUPPORTED(ctr_wrap_reg))
1245		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1246
1247	if (!delivered || !reference ||	!ref_perf) {
1248		ret = -EFAULT;
1249		goto out_err;
1250	}
1251
1252	perf_fb_ctrs->delivered = delivered;
1253	perf_fb_ctrs->reference = reference;
1254	perf_fb_ctrs->reference_perf = ref_perf;
1255	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1256out_err:
1257	if (regs_in_pcc)
1258		up_write(&pcc_ss_data->pcc_lock);
1259	return ret;
1260}
1261EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1262
1263/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1264 * cppc_set_perf - Set a CPU's performance controls.
1265 * @cpu: CPU for which to set performance controls.
1266 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1267 *
1268 * Return: 0 for success, -ERRNO otherwise.
1269 */
1270int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1271{
1272	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1273	struct cpc_register_resource *desired_reg;
1274	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1275	struct cppc_pcc_data *pcc_ss_data = NULL;
1276	int ret = 0;
1277
1278	if (!cpc_desc) {
1279		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1280		return -ENODEV;
1281	}
1282
1283	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1284
1285	/*
1286	 * This is Phase-I where we want to write to CPC registers
1287	 * -> We want all CPUs to be able to execute this phase in parallel
1288	 *
1289	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1290	 * achieve that goal here
1291	 */
1292	if (CPC_IN_PCC(desired_reg)) {
1293		if (pcc_ss_id < 0) {
1294			pr_debug("Invalid pcc_ss_id\n");
1295			return -ENODEV;
1296		}
1297		pcc_ss_data = pcc_data[pcc_ss_id];
1298		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1299		if (pcc_ss_data->platform_owns_pcc) {
1300			ret = check_pcc_chan(pcc_ss_id, false);
1301			if (ret) {
1302				up_read(&pcc_ss_data->pcc_lock);
1303				return ret;
1304			}
1305		}
1306		/*
1307		 * Update the pending_write to make sure a PCC CMD_READ will not
1308		 * arrive and steal the channel during the switch to write lock
1309		 */
1310		pcc_ss_data->pending_pcc_write_cmd = true;
1311		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1312		cpc_desc->write_cmd_status = 0;
1313	}
1314
1315	/*
1316	 * Skip writing MIN/MAX until Linux knows how to come up with
1317	 * useful values.
1318	 */
1319	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1320
1321	if (CPC_IN_PCC(desired_reg))
1322		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1323	/*
1324	 * This is Phase-II where we transfer the ownership of PCC to Platform
1325	 *
1326	 * Short Summary: Basically if we think of a group of cppc_set_perf
1327	 * requests that happened in short overlapping interval. The last CPU to
1328	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1329	 *
1330	 * We have the following requirements for Phase-II:
1331	 *     1. We want to execute Phase-II only when there are no CPUs
1332	 * currently executing in Phase-I
1333	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1334	 * entering Phase-I.
1335	 *     3. We want only one CPU among all those who went through Phase-I
1336	 * to run phase-II
1337	 *
1338	 * If write_trylock fails to get the lock and doesn't transfer the
1339	 * PCC ownership to the platform, then one of the following will be TRUE
1340	 *     1. There is at-least one CPU in Phase-I which will later execute
1341	 * write_trylock, so the CPUs in Phase-I will be responsible for
1342	 * executing the Phase-II.
1343	 *     2. Some other CPU has beaten this CPU to successfully execute the
1344	 * write_trylock and has already acquired the write_lock. We know for a
1345	 * fact it (other CPU acquiring the write_lock) couldn't have happened
1346	 * before this CPU's Phase-I as we held the read_lock.
1347	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1348	 * down_write, in which case, send_pcc_cmd will check for pending
1349	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1350	 * So this CPU can be certain that its request will be delivered
1351	 *    So in all cases, this CPU knows that its request will be delivered
1352	 * by another CPU and can return
1353	 *
1354	 * After getting the down_write we still need to check for
1355	 * pending_pcc_write_cmd to take care of the following scenario
1356	 *    The thread running this code could be scheduled out between
1357	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1358	 * could have delivered the request to Platform by triggering the
1359	 * doorbell and transferred the ownership of PCC to platform. So this
1360	 * avoids triggering an unnecessary doorbell and more importantly before
1361	 * triggering the doorbell it makes sure that the PCC channel ownership
1362	 * is still with OSPM.
1363	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1364	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1365	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1366	 * case during a CMD_READ and if there are pending writes it delivers
1367	 * the write command before servicing the read command
1368	 */
1369	if (CPC_IN_PCC(desired_reg)) {
1370		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1371			/* Update only if there are pending write commands */
1372			if (pcc_ss_data->pending_pcc_write_cmd)
1373				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1374			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1375		} else
1376			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1377			wait_event(pcc_ss_data->pcc_write_wait_q,
1378				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1379
1380		/* send_pcc_cmd updates the status in case of failure */
1381		ret = cpc_desc->write_cmd_status;
1382	}
1383	return ret;
1384}
1385EXPORT_SYMBOL_GPL(cppc_set_perf);
1386
1387/**
1388 * cppc_get_transition_latency - returns frequency transition latency in ns
1389 *
1390 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1391 * transition latency for perfromance change requests. The closest we have
1392 * is the timing information from the PCCT tables which provides the info
1393 * on the number and frequency of PCC commands the platform can handle.
 
 
 
1394 */
1395unsigned int cppc_get_transition_latency(int cpu_num)
1396{
1397	/*
1398	 * Expected transition latency is based on the PCCT timing values
1399	 * Below are definition from ACPI spec:
1400	 * pcc_nominal- Expected latency to process a command, in microseconds
1401	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1402	 *              channel can support, reported in commands per minute. 0
1403	 *              indicates no limitation.
1404	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1405	 *              completion of a command before issuing the next command,
1406	 *              in microseconds.
1407	 */
1408	unsigned int latency_ns = 0;
1409	struct cpc_desc *cpc_desc;
1410	struct cpc_register_resource *desired_reg;
1411	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1412	struct cppc_pcc_data *pcc_ss_data;
1413
1414	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1415	if (!cpc_desc)
1416		return CPUFREQ_ETERNAL;
1417
1418	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1419	if (!CPC_IN_PCC(desired_reg))
 
 
1420		return CPUFREQ_ETERNAL;
1421
1422	if (pcc_ss_id < 0)
1423		return CPUFREQ_ETERNAL;
1424
1425	pcc_ss_data = pcc_data[pcc_ss_id];
1426	if (pcc_ss_data->pcc_mpar)
1427		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1428
1429	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1430	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1431
1432	return latency_ns;
1433}
1434EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   4 *
   5 * (C) Copyright 2014, 2015 Linaro Ltd.
   6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   7 *
   8 * CPPC describes a few methods for controlling CPU performance using
   9 * information from a per CPU table called CPC. This table is described in
  10 * the ACPI v5.0+ specification. The table consists of a list of
  11 * registers which may be memory mapped or hardware registers and also may
  12 * include some static integer values.
  13 *
  14 * CPU performance is on an abstract continuous scale as against a discretized
  15 * P-state scale which is tied to CPU frequency only. In brief, the basic
  16 * operation involves:
  17 *
  18 * - OS makes a CPU performance request. (Can provide min and max bounds)
  19 *
  20 * - Platform (such as BMC) is free to optimize request within requested bounds
  21 *   depending on power/thermal budgets etc.
  22 *
  23 * - Platform conveys its decision back to OS
  24 *
  25 * The communication between OS and platform occurs through another medium
  26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
  27 * mechanism which includes doorbell semantics to indicate register updates.
  28 * See drivers/mailbox/pcc.c for details on PCC.
  29 *
  30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  31 * above specifications.
  32 */
  33
  34#define pr_fmt(fmt)	"ACPI CPPC: " fmt
  35
 
  36#include <linux/delay.h>
  37#include <linux/iopoll.h>
  38#include <linux/ktime.h>
  39#include <linux/rwsem.h>
  40#include <linux/wait.h>
  41#include <linux/topology.h>
  42
  43#include <acpi/cppc_acpi.h>
  44
  45struct cppc_pcc_data {
  46	struct pcc_mbox_chan *pcc_channel;
  47	void __iomem *pcc_comm_addr;
  48	bool pcc_channel_acquired;
  49	unsigned int deadline_us;
  50	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  51
  52	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
  53	bool platform_owns_pcc;		/* Ownership of PCC subspace */
  54	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
  55
  56	/*
  57	 * Lock to provide controlled access to the PCC channel.
  58	 *
  59	 * For performance critical usecases(currently cppc_set_perf)
  60	 *	We need to take read_lock and check if channel belongs to OSPM
  61	 * before reading or writing to PCC subspace
  62	 *	We need to take write_lock before transferring the channel
  63	 * ownership to the platform via a Doorbell
  64	 *	This allows us to batch a number of CPPC requests if they happen
  65	 * to originate in about the same time
  66	 *
  67	 * For non-performance critical usecases(init)
  68	 *	Take write_lock for all purposes which gives exclusive access
  69	 */
  70	struct rw_semaphore pcc_lock;
  71
  72	/* Wait queue for CPUs whose requests were batched */
  73	wait_queue_head_t pcc_write_wait_q;
  74	ktime_t last_cmd_cmpl_time;
  75	ktime_t last_mpar_reset;
  76	int mpar_count;
  77	int refcount;
  78};
  79
  80/* Array to represent the PCC channel per subspace ID */
  81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  82/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
  83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  84
  85/*
  86 * The cpc_desc structure contains the ACPI register details
  87 * as described in the per CPU _CPC tables. The details
  88 * include the type of register (e.g. PCC, System IO, FFH etc.)
  89 * and destination addresses which lets us READ/WRITE CPU performance
  90 * information using the appropriate I/O methods.
  91 */
  92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  93
  94/* pcc mapped address + header size + offset within PCC subspace */
  95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  96						0x8 + (offs))
  97
  98/* Check if a CPC register is in PCC */
  99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
 100				(cpc)->cpc_entry.reg.space_id ==	\
 101				ACPI_ADR_SPACE_PLATFORM_COMM)
 102
 103/* Check if a CPC register is in SystemMemory */
 104#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&	\
 105				(cpc)->cpc_entry.reg.space_id ==	\
 106				ACPI_ADR_SPACE_SYSTEM_MEMORY)
 107
 108/* Check if a CPC register is in SystemIo */
 109#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&	\
 110				(cpc)->cpc_entry.reg.space_id ==	\
 111				ACPI_ADR_SPACE_SYSTEM_IO)
 112
 113/* Evaluates to True if reg is a NULL register descriptor */
 114#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 115				(reg)->address == 0 &&			\
 116				(reg)->bit_width == 0 &&		\
 117				(reg)->bit_offset == 0 &&		\
 118				(reg)->access_width == 0)
 119
 120/* Evaluates to True if an optional cpc field is supported */
 121#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
 122				!!(cpc)->cpc_entry.int_value :		\
 123				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
 124/*
 125 * Arbitrary Retries in case the remote processor is slow to respond
 126 * to PCC commands. Keeping it high enough to cover emulators where
 127 * the processors run painfully slow.
 128 */
 129#define NUM_RETRIES 500ULL
 130
 131#define OVER_16BTS_MASK ~0xFFFFULL
 
 
 
 
 
 
 132
 133#define define_one_cppc_ro(_name)		\
 134static struct kobj_attribute _name =		\
 135__ATTR(_name, 0444, show_##_name, NULL)
 136
 137#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 138
 139#define show_cppc_data(access_fn, struct_name, member_name)		\
 140	static ssize_t show_##member_name(struct kobject *kobj,		\
 141				struct kobj_attribute *attr, char *buf)	\
 142	{								\
 143		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
 144		struct struct_name st_name = {0};			\
 145		int ret;						\
 146									\
 147		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
 148		if (ret)						\
 149			return ret;					\
 150									\
 151		return sysfs_emit(buf, "%llu\n",		\
 152				(u64)st_name.member_name);		\
 153	}								\
 154	define_one_cppc_ro(member_name)
 155
 156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
 157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
 158show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
 159show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
 160show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
 161show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
 162
 163show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 164show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 165
 166static ssize_t show_feedback_ctrs(struct kobject *kobj,
 167		struct kobj_attribute *attr, char *buf)
 168{
 169	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 170	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 171	int ret;
 172
 173	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 174	if (ret)
 175		return ret;
 176
 177	return sysfs_emit(buf, "ref:%llu del:%llu\n",
 178			fb_ctrs.reference, fb_ctrs.delivered);
 179}
 180define_one_cppc_ro(feedback_ctrs);
 181
 182static struct attribute *cppc_attrs[] = {
 183	&feedback_ctrs.attr,
 184	&reference_perf.attr,
 185	&wraparound_time.attr,
 186	&highest_perf.attr,
 187	&lowest_perf.attr,
 188	&lowest_nonlinear_perf.attr,
 189	&nominal_perf.attr,
 190	&nominal_freq.attr,
 191	&lowest_freq.attr,
 192	NULL
 193};
 194ATTRIBUTE_GROUPS(cppc);
 195
 196static struct kobj_type cppc_ktype = {
 197	.sysfs_ops = &kobj_sysfs_ops,
 198	.default_groups = cppc_groups,
 199};
 200
 201static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
 202{
 203	int ret, status;
 204	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 205	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 206		pcc_ss_data->pcc_comm_addr;
 207
 208	if (!pcc_ss_data->platform_owns_pcc)
 209		return 0;
 210
 211	/*
 212	 * Poll PCC status register every 3us(delay_us) for maximum of
 213	 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
 214	 */
 215	ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
 216					status & PCC_CMD_COMPLETE_MASK, 3,
 217					pcc_ss_data->deadline_us);
 218
 219	if (likely(!ret)) {
 220		pcc_ss_data->platform_owns_pcc = false;
 221		if (chk_err_bit && (status & PCC_ERROR_MASK))
 222			ret = -EIO;
 223	}
 224
 225	if (unlikely(ret))
 226		pr_err("PCC check channel failed for ss: %d. ret=%d\n",
 227		       pcc_ss_id, ret);
 228
 229	return ret;
 230}
 231
 232/*
 233 * This function transfers the ownership of the PCC to the platform
 234 * So it must be called while holding write_lock(pcc_lock)
 235 */
 236static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
 237{
 238	int ret = -EIO, i;
 239	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 240	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 241		pcc_ss_data->pcc_comm_addr;
 242	unsigned int time_delta;
 243
 244	/*
 245	 * For CMD_WRITE we know for a fact the caller should have checked
 246	 * the channel before writing to PCC space
 247	 */
 248	if (cmd == CMD_READ) {
 249		/*
 250		 * If there are pending cpc_writes, then we stole the channel
 251		 * before write completion, so first send a WRITE command to
 252		 * platform
 253		 */
 254		if (pcc_ss_data->pending_pcc_write_cmd)
 255			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
 256
 257		ret = check_pcc_chan(pcc_ss_id, false);
 258		if (ret)
 259			goto end;
 260	} else /* CMD_WRITE */
 261		pcc_ss_data->pending_pcc_write_cmd = FALSE;
 262
 263	/*
 264	 * Handle the Minimum Request Turnaround Time(MRTT)
 265	 * "The minimum amount of time that OSPM must wait after the completion
 266	 * of a command before issuing the next command, in microseconds"
 267	 */
 268	if (pcc_ss_data->pcc_mrtt) {
 269		time_delta = ktime_us_delta(ktime_get(),
 270					    pcc_ss_data->last_cmd_cmpl_time);
 271		if (pcc_ss_data->pcc_mrtt > time_delta)
 272			udelay(pcc_ss_data->pcc_mrtt - time_delta);
 273	}
 274
 275	/*
 276	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 277	 * "The maximum number of periodic requests that the subspace channel can
 278	 * support, reported in commands per minute. 0 indicates no limitation."
 279	 *
 280	 * This parameter should be ideally zero or large enough so that it can
 281	 * handle maximum number of requests that all the cores in the system can
 282	 * collectively generate. If it is not, we will follow the spec and just
 283	 * not send the request to the platform after hitting the MPAR limit in
 284	 * any 60s window
 285	 */
 286	if (pcc_ss_data->pcc_mpar) {
 287		if (pcc_ss_data->mpar_count == 0) {
 288			time_delta = ktime_ms_delta(ktime_get(),
 289						    pcc_ss_data->last_mpar_reset);
 290			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
 291				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
 292					 pcc_ss_id);
 293				ret = -EIO;
 294				goto end;
 295			}
 296			pcc_ss_data->last_mpar_reset = ktime_get();
 297			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
 298		}
 299		pcc_ss_data->mpar_count--;
 300	}
 301
 302	/* Write to the shared comm region. */
 303	writew_relaxed(cmd, &generic_comm_base->command);
 304
 305	/* Flip CMD COMPLETE bit */
 306	writew_relaxed(0, &generic_comm_base->status);
 307
 308	pcc_ss_data->platform_owns_pcc = true;
 309
 310	/* Ring doorbell */
 311	ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
 312	if (ret < 0) {
 313		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
 314		       pcc_ss_id, cmd, ret);
 315		goto end;
 316	}
 317
 318	/* wait for completion and check for PCC error bit */
 319	ret = check_pcc_chan(pcc_ss_id, true);
 320
 321	if (pcc_ss_data->pcc_mrtt)
 322		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
 323
 324	if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
 325		mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
 326	else
 327		mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
 328
 329end:
 330	if (cmd == CMD_WRITE) {
 331		if (unlikely(ret)) {
 332			for_each_possible_cpu(i) {
 333				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 334
 335				if (!desc)
 336					continue;
 337
 338				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
 339					desc->write_cmd_status = ret;
 340			}
 341		}
 342		pcc_ss_data->pcc_write_cnt++;
 343		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
 344	}
 345
 346	return ret;
 347}
 348
 349static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 350{
 351	if (ret < 0)
 352		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 353				*(u16 *)msg, ret);
 354	else
 355		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 356				*(u16 *)msg, ret);
 357}
 358
 359static struct mbox_client cppc_mbox_cl = {
 360	.tx_done = cppc_chan_tx_done,
 361	.knows_txdone = true,
 362};
 363
 364static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 365{
 366	int result = -EFAULT;
 367	acpi_status status = AE_OK;
 368	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 369	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 370	struct acpi_buffer state = {0, NULL};
 371	union acpi_object  *psd = NULL;
 372	struct acpi_psd_package *pdomain;
 373
 374	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
 375					    &buffer, ACPI_TYPE_PACKAGE);
 376	if (status == AE_NOT_FOUND)	/* _PSD is optional */
 377		return 0;
 378	if (ACPI_FAILURE(status))
 379		return -ENODEV;
 380
 381	psd = buffer.pointer;
 382	if (!psd || psd->package.count != 1) {
 383		pr_debug("Invalid _PSD data\n");
 384		goto end;
 385	}
 386
 387	pdomain = &(cpc_ptr->domain_info);
 388
 389	state.length = sizeof(struct acpi_psd_package);
 390	state.pointer = pdomain;
 391
 392	status = acpi_extract_package(&(psd->package.elements[0]),
 393		&format, &state);
 394	if (ACPI_FAILURE(status)) {
 395		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 396		goto end;
 397	}
 398
 399	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 400		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 401		goto end;
 402	}
 403
 404	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 405		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 406		goto end;
 407	}
 408
 409	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 410	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 411	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 412		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 413		goto end;
 414	}
 415
 416	result = 0;
 417end:
 418	kfree(buffer.pointer);
 419	return result;
 420}
 421
 422bool acpi_cpc_valid(void)
 423{
 424	struct cpc_desc *cpc_ptr;
 425	int cpu;
 426
 427	if (acpi_disabled)
 428		return false;
 429
 430	for_each_present_cpu(cpu) {
 431		cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
 432		if (!cpc_ptr)
 433			return false;
 434	}
 435
 436	return true;
 437}
 438EXPORT_SYMBOL_GPL(acpi_cpc_valid);
 439
 440bool cppc_allow_fast_switch(void)
 441{
 442	struct cpc_register_resource *desired_reg;
 443	struct cpc_desc *cpc_ptr;
 444	int cpu;
 445
 446	for_each_possible_cpu(cpu) {
 447		cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
 448		desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
 449		if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
 450				!CPC_IN_SYSTEM_IO(desired_reg))
 451			return false;
 452	}
 453
 454	return true;
 455}
 456EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
 457
 458/**
 459 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
 460 * @cpu: Find all CPUs that share a domain with cpu.
 461 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
 462 *
 463 *	Return: 0 for success or negative value for err.
 464 */
 465int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
 466{
 
 
 
 
 
 
 
 467	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 468	struct acpi_psd_package *match_pdomain;
 469	struct acpi_psd_package *pdomain;
 470	int count_target, i;
 471
 472	/*
 473	 * Now that we have _PSD data from all CPUs, let's setup P-state
 474	 * domain info.
 475	 */
 476	cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
 477	if (!cpc_ptr)
 478		return -EFAULT;
 
 479
 480	pdomain = &(cpc_ptr->domain_info);
 481	cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
 482	if (pdomain->num_processors <= 1)
 483		return 0;
 484
 485	/* Validate the Domain info */
 486	count_target = pdomain->num_processors;
 487	if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 488		cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 489	else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 490		cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
 491	else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 492		cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 493
 494	for_each_possible_cpu(i) {
 495		if (i == cpu)
 
 
 496			continue;
 497
 498		match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
 499		if (!match_cpc_ptr)
 500			goto err_fault;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501
 502		match_pdomain = &(match_cpc_ptr->domain_info);
 503		if (match_pdomain->domain != pdomain->domain)
 504			continue;
 
 
 
 
 
 
 505
 506		/* Here i and cpu are in the same domain */
 507		if (match_pdomain->num_processors != count_target)
 508			goto err_fault;
 
 509
 510		if (pdomain->coord_type != match_pdomain->coord_type)
 511			goto err_fault;
 
 512
 513		cpumask_set_cpu(i, cpu_data->shared_cpu_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514	}
 515
 516	return 0;
 
 
 
 
 517
 518err_fault:
 519	/* Assume no coordination on any error parsing domain info */
 520	cpumask_clear(cpu_data->shared_cpu_map);
 521	cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
 522	cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
 
 
 523
 524	return -EFAULT;
 
 525}
 526EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 527
 528static int register_pcc_channel(int pcc_ss_idx)
 529{
 530	struct pcc_mbox_chan *pcc_chan;
 531	u64 usecs_lat;
 532
 533	if (pcc_ss_idx >= 0) {
 534		pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
 
 535
 536		if (IS_ERR(pcc_chan)) {
 537			pr_err("Failed to find PCC channel for subspace %d\n",
 538			       pcc_ss_idx);
 539			return -ENODEV;
 540		}
 541
 542		pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
 
 
 
 
 
 
 
 
 
 
 
 
 
 543		/*
 544		 * cppc_ss->latency is just a Nominal value. In reality
 545		 * the remote processor could be much slower to reply.
 546		 * So add an arbitrary amount of wait on top of Nominal.
 547		 */
 548		usecs_lat = NUM_RETRIES * pcc_chan->latency;
 549		pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
 550		pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
 551		pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
 552		pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
 553
 554		pcc_data[pcc_ss_idx]->pcc_comm_addr =
 555			acpi_os_ioremap(pcc_chan->shmem_base_addr,
 556					pcc_chan->shmem_size);
 557		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
 558			pr_err("Failed to ioremap PCC comm region mem for %d\n",
 559			       pcc_ss_idx);
 560			return -ENOMEM;
 561		}
 562
 563		/* Set flag so that we don't come here for each CPU. */
 564		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
 565	}
 566
 567	return 0;
 568}
 569
 570/**
 571 * cpc_ffh_supported() - check if FFH reading supported
 572 *
 573 * Check if the architecture has support for functional fixed hardware
 574 * read/write capability.
 575 *
 576 * Return: true for supported, false for not supported
 577 */
 578bool __weak cpc_ffh_supported(void)
 579{
 580	return false;
 581}
 582
 583/**
 584 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
 585 *
 586 * Check if the architectural support for CPPC is present even
 587 * if the _OSC hasn't prescribed it
 588 *
 589 * Return: true for supported, false for not supported
 590 */
 591bool __weak cpc_supported_by_cpu(void)
 592{
 593	return false;
 594}
 595
 596/**
 597 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 598 *
 599 * Check and allocate the cppc_pcc_data memory.
 600 * In some processor configurations it is possible that same subspace
 601 * is shared between multiple CPUs. This is seen especially in CPUs
 602 * with hardware multi-threading support.
 603 *
 604 * Return: 0 for success, errno for failure
 605 */
 606static int pcc_data_alloc(int pcc_ss_id)
 607{
 608	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
 609		return -EINVAL;
 610
 611	if (pcc_data[pcc_ss_id]) {
 612		pcc_data[pcc_ss_id]->refcount++;
 613	} else {
 614		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
 615					      GFP_KERNEL);
 616		if (!pcc_data[pcc_ss_id])
 617			return -ENOMEM;
 618		pcc_data[pcc_ss_id]->refcount++;
 619	}
 620
 621	return 0;
 622}
 623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624/*
 625 * An example CPC table looks like the following.
 626 *
 627 *  Name (_CPC, Package() {
 628 *      17,							// NumEntries
 629 *      1,							// Revision
 630 *      ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)},	// Highest Performance
 631 *      ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)},	// Nominal Performance
 632 *      ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)},	// Lowest Nonlinear Performance
 633 *      ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)},	// Lowest Performance
 634 *      ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)},	// Guaranteed Performance Register
 635 *      ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)},	// Desired Performance Register
 636 *      ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
 637 *      ...
 638 *      ...
 639 *      ...
 640 *  }
 
 
 
 
 
 
 
 
 
 
 641 * Each Register() encodes how to access that specific register.
 642 * e.g. a sample PCC entry has the following encoding:
 643 *
 644 *  Register (
 645 *      PCC,	// AddressSpaceKeyword
 646 *      8,	// RegisterBitWidth
 647 *      8,	// RegisterBitOffset
 648 *      0x30,	// RegisterAddress
 649 *      9,	// AccessSize (subspace ID)
 650 *  )
 
 
 
 
 
 
 
 651 */
 652
 653#ifndef arch_init_invariance_cppc
 654static inline void arch_init_invariance_cppc(void) { }
 655#endif
 656
 657/**
 658 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 659 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 660 *
 661 *	Return: 0 for success or negative value for err.
 662 */
 663int acpi_cppc_processor_probe(struct acpi_processor *pr)
 664{
 665	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 666	union acpi_object *out_obj, *cpc_obj;
 667	struct cpc_desc *cpc_ptr;
 668	struct cpc_reg *gas_t;
 669	struct device *cpu_dev;
 670	acpi_handle handle = pr->handle;
 671	unsigned int num_ent, i, cpc_rev;
 672	int pcc_subspace_id = -1;
 673	acpi_status status;
 674	int ret = -ENODATA;
 675
 676	if (!osc_sb_cppc2_support_acked) {
 677		pr_debug("CPPC v2 _OSC not acked\n");
 678		if (!cpc_supported_by_cpu())
 679			return -ENODEV;
 680	}
 681
 682	/* Parse the ACPI _CPC table for this CPU. */
 683	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 684			ACPI_TYPE_PACKAGE);
 685	if (ACPI_FAILURE(status)) {
 686		ret = -ENODEV;
 687		goto out_buf_free;
 688	}
 689
 690	out_obj = (union acpi_object *) output.pointer;
 691
 692	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 693	if (!cpc_ptr) {
 694		ret = -ENOMEM;
 695		goto out_buf_free;
 696	}
 697
 698	/* First entry is NumEntries. */
 699	cpc_obj = &out_obj->package.elements[0];
 700	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 701		num_ent = cpc_obj->integer.value;
 702		if (num_ent <= 1) {
 703			pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
 704				 num_ent, pr->id);
 705			goto out_free;
 706		}
 707	} else {
 708		pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
 709			 cpc_obj->type, pr->id);
 710		goto out_free;
 711	}
 
 712
 713	/* Second entry should be revision. */
 714	cpc_obj = &out_obj->package.elements[1];
 715	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 716		cpc_rev = cpc_obj->integer.value;
 717	} else {
 718		pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
 719			 cpc_obj->type, pr->id);
 720		goto out_free;
 721	}
 722
 723	if (cpc_rev < CPPC_V2_REV) {
 724		pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
 725			 pr->id);
 726		goto out_free;
 727	}
 
 728
 729	/*
 730	 * Disregard _CPC if the number of entries in the return pachage is not
 731	 * as expected, but support future revisions being proper supersets of
 732	 * the v3 and only causing more entries to be returned by _CPC.
 733	 */
 734	if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
 735	    (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
 736	    (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
 737		pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
 738			 num_ent, pr->id);
 739		goto out_free;
 740	}
 741	if (cpc_rev > CPPC_V3_REV) {
 742		num_ent = CPPC_V3_NUM_ENT;
 743		cpc_rev = CPPC_V3_REV;
 744	}
 745
 746	cpc_ptr->num_entries = num_ent;
 747	cpc_ptr->version = cpc_rev;
 748
 749	/* Iterate through remaining entries in _CPC */
 750	for (i = 2; i < num_ent; i++) {
 751		cpc_obj = &out_obj->package.elements[i];
 752
 753		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 754			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 755			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 756		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 757			gas_t = (struct cpc_reg *)
 758				cpc_obj->buffer.pointer;
 759
 760			/*
 761			 * The PCC Subspace index is encoded inside
 762			 * the CPC table entries. The same PCC index
 763			 * will be used for all the PCC entries,
 764			 * so extract it only once.
 765			 */
 766			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 767				if (pcc_subspace_id < 0) {
 768					pcc_subspace_id = gas_t->access_width;
 769					if (pcc_data_alloc(pcc_subspace_id))
 770						goto out_free;
 771				} else if (pcc_subspace_id != gas_t->access_width) {
 772					pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
 773						 pr->id);
 774					goto out_free;
 775				}
 776			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 777				if (gas_t->address) {
 778					void __iomem *addr;
 779
 780					if (!osc_cpc_flexible_adr_space_confirmed) {
 781						pr_debug("Flexible address space capability not supported\n");
 782						if (!cpc_supported_by_cpu())
 783							goto out_free;
 784					}
 785
 786					addr = ioremap(gas_t->address, gas_t->bit_width/8);
 787					if (!addr)
 788						goto out_free;
 789					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 790				}
 791			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
 792				if (gas_t->access_width < 1 || gas_t->access_width > 3) {
 793					/*
 794					 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
 795					 * SystemIO doesn't implement 64-bit
 796					 * registers.
 797					 */
 798					pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
 799						 gas_t->access_width);
 800					goto out_free;
 801				}
 802				if (gas_t->address & OVER_16BTS_MASK) {
 803					/* SystemIO registers use 16-bit integer addresses */
 804					pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
 805						 gas_t->address);
 806					goto out_free;
 807				}
 808				if (!osc_cpc_flexible_adr_space_confirmed) {
 809					pr_debug("Flexible address space capability not supported\n");
 810					if (!cpc_supported_by_cpu())
 811						goto out_free;
 812				}
 813			} else {
 814				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 815					/* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
 816					pr_debug("Unsupported register type (%d) in _CPC\n",
 817						 gas_t->space_id);
 818					goto out_free;
 819				}
 820			}
 821
 822			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 823			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 824		} else {
 825			pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
 826				 i, pr->id);
 827			goto out_free;
 828		}
 829	}
 830	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
 831
 832	/*
 833	 * Initialize the remaining cpc_regs as unsupported.
 834	 * Example: In case FW exposes CPPC v2, the below loop will initialize
 835	 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
 836	 */
 837	for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
 838		cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
 839		cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
 840	}
 841
 842
 843	/* Store CPU Logical ID */
 844	cpc_ptr->cpu_id = pr->id;
 845
 846	/* Parse PSD data for this CPU */
 847	ret = acpi_get_psd(cpc_ptr, handle);
 848	if (ret)
 849		goto out_free;
 850
 851	/* Register PCC channel once for all PCC subspace ID. */
 852	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
 853		ret = register_pcc_channel(pcc_subspace_id);
 854		if (ret)
 855			goto out_free;
 856
 857		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
 858		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
 859	}
 860
 861	/* Everything looks okay */
 862	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 863
 864	/* Add per logical CPU nodes for reading its feedback counters. */
 865	cpu_dev = get_cpu_device(pr->id);
 866	if (!cpu_dev) {
 867		ret = -EINVAL;
 868		goto out_free;
 869	}
 870
 871	/* Plug PSD data into this CPU's CPC descriptor. */
 872	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 873
 874	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 875			"acpi_cppc");
 876	if (ret) {
 877		per_cpu(cpc_desc_ptr, pr->id) = NULL;
 878		kobject_put(&cpc_ptr->kobj);
 879		goto out_free;
 880	}
 881
 882	arch_init_invariance_cppc();
 883
 884	kfree(output.pointer);
 885	return 0;
 886
 887out_free:
 888	/* Free all the mapped sys mem areas for this CPU */
 889	for (i = 2; i < cpc_ptr->num_entries; i++) {
 890		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 891
 892		if (addr)
 893			iounmap(addr);
 894	}
 895	kfree(cpc_ptr);
 896
 897out_buf_free:
 898	kfree(output.pointer);
 899	return ret;
 900}
 901EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 902
 903/**
 904 * acpi_cppc_processor_exit - Cleanup CPC structs.
 905 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 906 *
 907 * Return: Void
 908 */
 909void acpi_cppc_processor_exit(struct acpi_processor *pr)
 910{
 911	struct cpc_desc *cpc_ptr;
 912	unsigned int i;
 913	void __iomem *addr;
 914	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
 915
 916	if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
 917		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
 918			pcc_data[pcc_ss_id]->refcount--;
 919			if (!pcc_data[pcc_ss_id]->refcount) {
 920				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
 921				kfree(pcc_data[pcc_ss_id]);
 922				pcc_data[pcc_ss_id] = NULL;
 923			}
 924		}
 925	}
 926
 927	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 928	if (!cpc_ptr)
 929		return;
 930
 931	/* Free all the mapped sys mem areas for this CPU */
 932	for (i = 2; i < cpc_ptr->num_entries; i++) {
 933		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 934		if (addr)
 935			iounmap(addr);
 936	}
 937
 938	kobject_put(&cpc_ptr->kobj);
 939	kfree(cpc_ptr);
 940}
 941EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 942
 943/**
 944 * cpc_read_ffh() - Read FFH register
 945 * @cpunum:	CPU number to read
 946 * @reg:	cppc register information
 947 * @val:	place holder for return value
 948 *
 949 * Read bit_width bits from a specified address and bit_offset
 950 *
 951 * Return: 0 for success and error code
 952 */
 953int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 954{
 955	return -ENOTSUPP;
 956}
 957
 958/**
 959 * cpc_write_ffh() - Write FFH register
 960 * @cpunum:	CPU number to write
 961 * @reg:	cppc register information
 962 * @val:	value to write
 963 *
 964 * Write value of bit_width bits to a specified address and bit_offset
 965 *
 966 * Return: 0 for success and error code
 967 */
 968int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 969{
 970	return -ENOTSUPP;
 971}
 972
 973/*
 974 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 975 * as fast as possible. We have already mapped the PCC subspace during init, so
 976 * we can directly write to it.
 977 */
 978
 979static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 980{
 981	void __iomem *vaddr = NULL;
 
 982	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 983	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 984
 985	if (reg_res->type == ACPI_TYPE_INTEGER) {
 986		*val = reg_res->cpc_entry.int_value;
 987		return 0;
 988	}
 989
 990	*val = 0;
 991
 992	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
 993		u32 width = 8 << (reg->access_width - 1);
 994		u32 val_u32;
 995		acpi_status status;
 996
 997		status = acpi_os_read_port((acpi_io_address)reg->address,
 998					   &val_u32, width);
 999		if (ACPI_FAILURE(status)) {
1000			pr_debug("Error: Failed to read SystemIO port %llx\n",
1001				 reg->address);
1002			return -EFAULT;
1003		}
1004
1005		*val = val_u32;
1006		return 0;
1007	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1008		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1009	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1010		vaddr = reg_res->sys_mem_vaddr;
1011	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1012		return cpc_read_ffh(cpu, reg, val);
1013	else
1014		return acpi_os_read_memory((acpi_physical_address)reg->address,
1015				val, reg->bit_width);
1016
1017	switch (reg->bit_width) {
1018	case 8:
1019		*val = readb_relaxed(vaddr);
1020		break;
1021	case 16:
1022		*val = readw_relaxed(vaddr);
1023		break;
1024	case 32:
1025		*val = readl_relaxed(vaddr);
1026		break;
1027	case 64:
1028		*val = readq_relaxed(vaddr);
1029		break;
1030	default:
1031		pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1032			 reg->bit_width, pcc_ss_id);
1033		return -EFAULT;
1034	}
1035
1036	return 0;
1037}
1038
1039static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1040{
1041	int ret_val = 0;
1042	void __iomem *vaddr = NULL;
1043	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1044	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1045
1046	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1047		u32 width = 8 << (reg->access_width - 1);
1048		acpi_status status;
1049
1050		status = acpi_os_write_port((acpi_io_address)reg->address,
1051					    (u32)val, width);
1052		if (ACPI_FAILURE(status)) {
1053			pr_debug("Error: Failed to write SystemIO port %llx\n",
1054				 reg->address);
1055			return -EFAULT;
1056		}
1057
1058		return 0;
1059	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1060		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1061	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1062		vaddr = reg_res->sys_mem_vaddr;
1063	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1064		return cpc_write_ffh(cpu, reg, val);
1065	else
1066		return acpi_os_write_memory((acpi_physical_address)reg->address,
1067				val, reg->bit_width);
1068
1069	switch (reg->bit_width) {
1070	case 8:
1071		writeb_relaxed(val, vaddr);
1072		break;
1073	case 16:
1074		writew_relaxed(val, vaddr);
1075		break;
1076	case 32:
1077		writel_relaxed(val, vaddr);
1078		break;
1079	case 64:
1080		writeq_relaxed(val, vaddr);
1081		break;
1082	default:
1083		pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1084			 reg->bit_width, pcc_ss_id);
1085		ret_val = -EFAULT;
1086		break;
1087	}
1088
1089	return ret_val;
1090}
1091
1092static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
 
 
 
 
 
 
 
1093{
1094	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1095	struct cpc_register_resource *reg;
 
 
1096
1097	if (!cpc_desc) {
1098		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1099		return -ENODEV;
1100	}
1101
1102	reg = &cpc_desc->cpc_regs[reg_idx];
1103
1104	if (CPC_IN_PCC(reg)) {
1105		int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1106		struct cppc_pcc_data *pcc_ss_data = NULL;
1107		int ret = 0;
1108
1109		if (pcc_ss_id < 0)
1110			return -EIO;
1111
1112		pcc_ss_data = pcc_data[pcc_ss_id];
1113
1114		down_write(&pcc_ss_data->pcc_lock);
1115
1116		if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1117			cpc_read(cpunum, reg, perf);
1118		else
1119			ret = -EIO;
1120
1121		up_write(&pcc_ss_data->pcc_lock);
1122
1123		return ret;
1124	}
1125
1126	cpc_read(cpunum, reg, perf);
1127
1128	return 0;
1129}
1130
1131/**
1132 * cppc_get_desired_perf - Get the desired performance register value.
1133 * @cpunum: CPU from which to get desired performance.
1134 * @desired_perf: Return address.
1135 *
1136 * Return: 0 for success, -EIO otherwise.
1137 */
1138int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1139{
1140	return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1141}
1142EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1143
1144/**
1145 * cppc_get_nominal_perf - Get the nominal performance register value.
1146 * @cpunum: CPU from which to get nominal performance.
1147 * @nominal_perf: Return address.
1148 *
1149 * Return: 0 for success, -EIO otherwise.
1150 */
1151int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1152{
1153	return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1154}
1155
1156/**
1157 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1158 * @cpunum: CPU from which to get capabilities info.
1159 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1160 *
1161 * Return: 0 for success with perf_caps populated else -ERRNO.
1162 */
1163int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1164{
1165	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1166	struct cpc_register_resource *highest_reg, *lowest_reg,
1167		*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1168		*low_freq_reg = NULL, *nom_freq_reg = NULL;
1169	u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1170	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1171	struct cppc_pcc_data *pcc_ss_data = NULL;
1172	int ret = 0, regs_in_pcc = 0;
1173
1174	if (!cpc_desc) {
1175		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1176		return -ENODEV;
1177	}
1178
1179	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1180	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1181	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1182	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1183	low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1184	nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1185	guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1186
1187	/* Are any of the regs PCC ?*/
1188	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1189		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1190		CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1191		if (pcc_ss_id < 0) {
1192			pr_debug("Invalid pcc_ss_id\n");
1193			return -ENODEV;
1194		}
1195		pcc_ss_data = pcc_data[pcc_ss_id];
1196		regs_in_pcc = 1;
1197		down_write(&pcc_ss_data->pcc_lock);
1198		/* Ring doorbell once to update PCC subspace */
1199		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1200			ret = -EIO;
1201			goto out_err;
1202		}
1203	}
1204
1205	cpc_read(cpunum, highest_reg, &high);
1206	perf_caps->highest_perf = high;
1207
1208	cpc_read(cpunum, lowest_reg, &low);
1209	perf_caps->lowest_perf = low;
1210
1211	cpc_read(cpunum, nominal_reg, &nom);
1212	perf_caps->nominal_perf = nom;
1213
1214	if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1215	    IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1216		perf_caps->guaranteed_perf = 0;
1217	} else {
1218		cpc_read(cpunum, guaranteed_reg, &guaranteed);
1219		perf_caps->guaranteed_perf = guaranteed;
1220	}
1221
1222	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1223	perf_caps->lowest_nonlinear_perf = min_nonlinear;
1224
1225	if (!high || !low || !nom || !min_nonlinear)
1226		ret = -EFAULT;
1227
1228	/* Read optional lowest and nominal frequencies if present */
1229	if (CPC_SUPPORTED(low_freq_reg))
1230		cpc_read(cpunum, low_freq_reg, &low_f);
1231
1232	if (CPC_SUPPORTED(nom_freq_reg))
1233		cpc_read(cpunum, nom_freq_reg, &nom_f);
1234
1235	perf_caps->lowest_freq = low_f;
1236	perf_caps->nominal_freq = nom_f;
1237
1238
1239out_err:
1240	if (regs_in_pcc)
1241		up_write(&pcc_ss_data->pcc_lock);
1242	return ret;
1243}
1244EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1245
1246/**
1247 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1248 *
1249 * CPPC has flexibility about how CPU performance counters are accessed.
1250 * One of the choices is PCC regions, which can have a high access latency. This
1251 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1252 *
1253 * Return: true if any of the counters are in PCC regions, false otherwise
1254 */
1255bool cppc_perf_ctrs_in_pcc(void)
1256{
1257	int cpu;
1258
1259	for_each_present_cpu(cpu) {
1260		struct cpc_register_resource *ref_perf_reg;
1261		struct cpc_desc *cpc_desc;
1262
1263		cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1264
1265		if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1266		    CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1267		    CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1268			return true;
1269
1270
1271		ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1272
1273		/*
1274		 * If reference perf register is not supported then we should
1275		 * use the nominal perf value
1276		 */
1277		if (!CPC_SUPPORTED(ref_perf_reg))
1278			ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1279
1280		if (CPC_IN_PCC(ref_perf_reg))
1281			return true;
1282	}
1283
1284	return false;
1285}
1286EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1287
1288/**
1289 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1290 * @cpunum: CPU from which to read counters.
1291 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1292 *
1293 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1294 */
1295int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1296{
1297	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1298	struct cpc_register_resource *delivered_reg, *reference_reg,
1299		*ref_perf_reg, *ctr_wrap_reg;
1300	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1301	struct cppc_pcc_data *pcc_ss_data = NULL;
1302	u64 delivered, reference, ref_perf, ctr_wrap_time;
1303	int ret = 0, regs_in_pcc = 0;
1304
1305	if (!cpc_desc) {
1306		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1307		return -ENODEV;
1308	}
1309
1310	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1311	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1312	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1313	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1314
1315	/*
1316	 * If reference perf register is not supported then we should
1317	 * use the nominal perf value
1318	 */
1319	if (!CPC_SUPPORTED(ref_perf_reg))
1320		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1321
1322	/* Are any of the regs PCC ?*/
1323	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1324		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1325		if (pcc_ss_id < 0) {
1326			pr_debug("Invalid pcc_ss_id\n");
1327			return -ENODEV;
1328		}
1329		pcc_ss_data = pcc_data[pcc_ss_id];
1330		down_write(&pcc_ss_data->pcc_lock);
1331		regs_in_pcc = 1;
1332		/* Ring doorbell once to update PCC subspace */
1333		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1334			ret = -EIO;
1335			goto out_err;
1336		}
1337	}
1338
1339	cpc_read(cpunum, delivered_reg, &delivered);
1340	cpc_read(cpunum, reference_reg, &reference);
1341	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1342
1343	/*
1344	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1345	 * performance counters are assumed to never wrap during the lifetime of
1346	 * platform
1347	 */
1348	ctr_wrap_time = (u64)(~((u64)0));
1349	if (CPC_SUPPORTED(ctr_wrap_reg))
1350		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1351
1352	if (!delivered || !reference ||	!ref_perf) {
1353		ret = -EFAULT;
1354		goto out_err;
1355	}
1356
1357	perf_fb_ctrs->delivered = delivered;
1358	perf_fb_ctrs->reference = reference;
1359	perf_fb_ctrs->reference_perf = ref_perf;
1360	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1361out_err:
1362	if (regs_in_pcc)
1363		up_write(&pcc_ss_data->pcc_lock);
1364	return ret;
1365}
1366EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1367
1368/**
1369 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1370 * Continuous Performance Control package EnableRegister field.
1371 * @cpu: CPU for which to enable CPPC register.
1372 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1373 *
1374 * Return: 0 for success, -ERRNO or -EIO otherwise.
1375 */
1376int cppc_set_enable(int cpu, bool enable)
1377{
1378	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1379	struct cpc_register_resource *enable_reg;
1380	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1381	struct cppc_pcc_data *pcc_ss_data = NULL;
1382	int ret = -EINVAL;
1383
1384	if (!cpc_desc) {
1385		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1386		return -EINVAL;
1387	}
1388
1389	enable_reg = &cpc_desc->cpc_regs[ENABLE];
1390
1391	if (CPC_IN_PCC(enable_reg)) {
1392
1393		if (pcc_ss_id < 0)
1394			return -EIO;
1395
1396		ret = cpc_write(cpu, enable_reg, enable);
1397		if (ret)
1398			return ret;
1399
1400		pcc_ss_data = pcc_data[pcc_ss_id];
1401
1402		down_write(&pcc_ss_data->pcc_lock);
1403		/* after writing CPC, transfer the ownership of PCC to platfrom */
1404		ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1405		up_write(&pcc_ss_data->pcc_lock);
1406		return ret;
1407	}
1408
1409	return cpc_write(cpu, enable_reg, enable);
1410}
1411EXPORT_SYMBOL_GPL(cppc_set_enable);
1412
1413/**
1414 * cppc_set_perf - Set a CPU's performance controls.
1415 * @cpu: CPU for which to set performance controls.
1416 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1417 *
1418 * Return: 0 for success, -ERRNO otherwise.
1419 */
1420int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1421{
1422	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1423	struct cpc_register_resource *desired_reg;
1424	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1425	struct cppc_pcc_data *pcc_ss_data = NULL;
1426	int ret = 0;
1427
1428	if (!cpc_desc) {
1429		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1430		return -ENODEV;
1431	}
1432
1433	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1434
1435	/*
1436	 * This is Phase-I where we want to write to CPC registers
1437	 * -> We want all CPUs to be able to execute this phase in parallel
1438	 *
1439	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1440	 * achieve that goal here
1441	 */
1442	if (CPC_IN_PCC(desired_reg)) {
1443		if (pcc_ss_id < 0) {
1444			pr_debug("Invalid pcc_ss_id\n");
1445			return -ENODEV;
1446		}
1447		pcc_ss_data = pcc_data[pcc_ss_id];
1448		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1449		if (pcc_ss_data->platform_owns_pcc) {
1450			ret = check_pcc_chan(pcc_ss_id, false);
1451			if (ret) {
1452				up_read(&pcc_ss_data->pcc_lock);
1453				return ret;
1454			}
1455		}
1456		/*
1457		 * Update the pending_write to make sure a PCC CMD_READ will not
1458		 * arrive and steal the channel during the switch to write lock
1459		 */
1460		pcc_ss_data->pending_pcc_write_cmd = true;
1461		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1462		cpc_desc->write_cmd_status = 0;
1463	}
1464
1465	/*
1466	 * Skip writing MIN/MAX until Linux knows how to come up with
1467	 * useful values.
1468	 */
1469	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1470
1471	if (CPC_IN_PCC(desired_reg))
1472		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1473	/*
1474	 * This is Phase-II where we transfer the ownership of PCC to Platform
1475	 *
1476	 * Short Summary: Basically if we think of a group of cppc_set_perf
1477	 * requests that happened in short overlapping interval. The last CPU to
1478	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1479	 *
1480	 * We have the following requirements for Phase-II:
1481	 *     1. We want to execute Phase-II only when there are no CPUs
1482	 * currently executing in Phase-I
1483	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1484	 * entering Phase-I.
1485	 *     3. We want only one CPU among all those who went through Phase-I
1486	 * to run phase-II
1487	 *
1488	 * If write_trylock fails to get the lock and doesn't transfer the
1489	 * PCC ownership to the platform, then one of the following will be TRUE
1490	 *     1. There is at-least one CPU in Phase-I which will later execute
1491	 * write_trylock, so the CPUs in Phase-I will be responsible for
1492	 * executing the Phase-II.
1493	 *     2. Some other CPU has beaten this CPU to successfully execute the
1494	 * write_trylock and has already acquired the write_lock. We know for a
1495	 * fact it (other CPU acquiring the write_lock) couldn't have happened
1496	 * before this CPU's Phase-I as we held the read_lock.
1497	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1498	 * down_write, in which case, send_pcc_cmd will check for pending
1499	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1500	 * So this CPU can be certain that its request will be delivered
1501	 *    So in all cases, this CPU knows that its request will be delivered
1502	 * by another CPU and can return
1503	 *
1504	 * After getting the down_write we still need to check for
1505	 * pending_pcc_write_cmd to take care of the following scenario
1506	 *    The thread running this code could be scheduled out between
1507	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1508	 * could have delivered the request to Platform by triggering the
1509	 * doorbell and transferred the ownership of PCC to platform. So this
1510	 * avoids triggering an unnecessary doorbell and more importantly before
1511	 * triggering the doorbell it makes sure that the PCC channel ownership
1512	 * is still with OSPM.
1513	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1514	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1515	 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1516	 * case during a CMD_READ and if there are pending writes it delivers
1517	 * the write command before servicing the read command
1518	 */
1519	if (CPC_IN_PCC(desired_reg)) {
1520		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1521			/* Update only if there are pending write commands */
1522			if (pcc_ss_data->pending_pcc_write_cmd)
1523				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1524			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1525		} else
1526			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1527			wait_event(pcc_ss_data->pcc_write_wait_q,
1528				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1529
1530		/* send_pcc_cmd updates the status in case of failure */
1531		ret = cpc_desc->write_cmd_status;
1532	}
1533	return ret;
1534}
1535EXPORT_SYMBOL_GPL(cppc_set_perf);
1536
1537/**
1538 * cppc_get_transition_latency - returns frequency transition latency in ns
1539 *
1540 * ACPI CPPC does not explicitly specify how a platform can specify the
1541 * transition latency for performance change requests. The closest we have
1542 * is the timing information from the PCCT tables which provides the info
1543 * on the number and frequency of PCC commands the platform can handle.
1544 *
1545 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1546 * then assume there is no latency.
1547 */
1548unsigned int cppc_get_transition_latency(int cpu_num)
1549{
1550	/*
1551	 * Expected transition latency is based on the PCCT timing values
1552	 * Below are definition from ACPI spec:
1553	 * pcc_nominal- Expected latency to process a command, in microseconds
1554	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1555	 *              channel can support, reported in commands per minute. 0
1556	 *              indicates no limitation.
1557	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1558	 *              completion of a command before issuing the next command,
1559	 *              in microseconds.
1560	 */
1561	unsigned int latency_ns = 0;
1562	struct cpc_desc *cpc_desc;
1563	struct cpc_register_resource *desired_reg;
1564	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1565	struct cppc_pcc_data *pcc_ss_data;
1566
1567	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1568	if (!cpc_desc)
1569		return CPUFREQ_ETERNAL;
1570
1571	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1572	if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1573		return 0;
1574	else if (!CPC_IN_PCC(desired_reg))
1575		return CPUFREQ_ETERNAL;
1576
1577	if (pcc_ss_id < 0)
1578		return CPUFREQ_ETERNAL;
1579
1580	pcc_ss_data = pcc_data[pcc_ss_id];
1581	if (pcc_ss_data->pcc_mpar)
1582		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1583
1584	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1585	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1586
1587	return latency_ns;
1588}
1589EXPORT_SYMBOL_GPL(cppc_get_transition_latency);