Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   4 *
   5 * (C) Copyright 2014, 2015 Linaro Ltd.
   6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   7 *
 
 
 
 
 
   8 * CPPC describes a few methods for controlling CPU performance using
   9 * information from a per CPU table called CPC. This table is described in
  10 * the ACPI v5.0+ specification. The table consists of a list of
  11 * registers which may be memory mapped or hardware registers and also may
  12 * include some static integer values.
  13 *
  14 * CPU performance is on an abstract continuous scale as against a discretized
  15 * P-state scale which is tied to CPU frequency only. In brief, the basic
  16 * operation involves:
  17 *
  18 * - OS makes a CPU performance request. (Can provide min and max bounds)
  19 *
  20 * - Platform (such as BMC) is free to optimize request within requested bounds
  21 *   depending on power/thermal budgets etc.
  22 *
  23 * - Platform conveys its decision back to OS
  24 *
  25 * The communication between OS and platform occurs through another medium
  26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
  27 * mechanism which includes doorbell semantics to indicate register updates.
  28 * See drivers/mailbox/pcc.c for details on PCC.
  29 *
  30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  31 * above specifications.
  32 */
  33
  34#define pr_fmt(fmt)	"ACPI CPPC: " fmt
  35
  36#include <linux/cpufreq.h>
  37#include <linux/delay.h>
  38#include <linux/iopoll.h>
  39#include <linux/ktime.h>
  40#include <linux/rwsem.h>
  41#include <linux/wait.h>
  42
  43#include <acpi/cppc_acpi.h>
  44
  45struct cppc_pcc_data {
  46	struct mbox_chan *pcc_channel;
  47	void __iomem *pcc_comm_addr;
  48	bool pcc_channel_acquired;
  49	unsigned int deadline_us;
  50	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  51
  52	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
  53	bool platform_owns_pcc;		/* Ownership of PCC subspace */
  54	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
  55
  56	/*
  57	 * Lock to provide controlled access to the PCC channel.
  58	 *
  59	 * For performance critical usecases(currently cppc_set_perf)
  60	 *	We need to take read_lock and check if channel belongs to OSPM
  61	 * before reading or writing to PCC subspace
  62	 *	We need to take write_lock before transferring the channel
  63	 * ownership to the platform via a Doorbell
  64	 *	This allows us to batch a number of CPPC requests if they happen
  65	 * to originate in about the same time
  66	 *
  67	 * For non-performance critical usecases(init)
  68	 *	Take write_lock for all purposes which gives exclusive access
  69	 */
  70	struct rw_semaphore pcc_lock;
  71
  72	/* Wait queue for CPUs whose requests were batched */
  73	wait_queue_head_t pcc_write_wait_q;
  74	ktime_t last_cmd_cmpl_time;
  75	ktime_t last_mpar_reset;
  76	int mpar_count;
  77	int refcount;
  78};
  79
  80/* Array to represent the PCC channel per subspace ID */
  81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  82/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
  83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  84
  85/*
  86 * The cpc_desc structure contains the ACPI register details
  87 * as described in the per CPU _CPC tables. The details
  88 * include the type of register (e.g. PCC, System IO, FFH etc.)
  89 * and destination addresses which lets us READ/WRITE CPU performance
  90 * information using the appropriate I/O methods.
  91 */
  92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  93
  94/* pcc mapped address + header size + offset within PCC subspace */
  95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  96						0x8 + (offs))
  97
  98/* Check if a CPC register is in PCC */
  99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
 100				(cpc)->cpc_entry.reg.space_id ==	\
 101				ACPI_ADR_SPACE_PLATFORM_COMM)
 102
 103/* Evalutes to True if reg is a NULL register descriptor */
 104#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 105				(reg)->address == 0 &&			\
 106				(reg)->bit_width == 0 &&		\
 107				(reg)->bit_offset == 0 &&		\
 108				(reg)->access_width == 0)
 109
 110/* Evalutes to True if an optional cpc field is supported */
 111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
 112				!!(cpc)->cpc_entry.int_value :		\
 113				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
 114/*
 115 * Arbitrary Retries in case the remote processor is slow to respond
 116 * to PCC commands. Keeping it high enough to cover emulators where
 117 * the processors run painfully slow.
 118 */
 119#define NUM_RETRIES 500ULL
 120
 121struct cppc_attr {
 122	struct attribute attr;
 123	ssize_t (*show)(struct kobject *kobj,
 124			struct attribute *attr, char *buf);
 125	ssize_t (*store)(struct kobject *kobj,
 126			struct attribute *attr, const char *c, ssize_t count);
 127};
 128
 129#define define_one_cppc_ro(_name)		\
 130static struct cppc_attr _name =			\
 131__ATTR(_name, 0444, show_##_name, NULL)
 132
 133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 134
 135#define show_cppc_data(access_fn, struct_name, member_name)		\
 136	static ssize_t show_##member_name(struct kobject *kobj,		\
 137					struct attribute *attr,	char *buf) \
 138	{								\
 139		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
 140		struct struct_name st_name = {0};			\
 141		int ret;						\
 142									\
 143		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
 144		if (ret)						\
 145			return ret;					\
 146									\
 147		return scnprintf(buf, PAGE_SIZE, "%llu\n",		\
 148				(u64)st_name.member_name);		\
 149	}								\
 150	define_one_cppc_ro(member_name)
 151
 152show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
 153show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
 154show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
 155show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
 156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
 157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
 158
 159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 161
 162static ssize_t show_feedback_ctrs(struct kobject *kobj,
 163		struct attribute *attr, char *buf)
 164{
 165	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 166	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 167	int ret;
 168
 169	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 170	if (ret)
 171		return ret;
 172
 173	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
 174			fb_ctrs.reference, fb_ctrs.delivered);
 175}
 176define_one_cppc_ro(feedback_ctrs);
 177
 178static struct attribute *cppc_attrs[] = {
 179	&feedback_ctrs.attr,
 180	&reference_perf.attr,
 181	&wraparound_time.attr,
 182	&highest_perf.attr,
 183	&lowest_perf.attr,
 184	&lowest_nonlinear_perf.attr,
 185	&nominal_perf.attr,
 186	&nominal_freq.attr,
 187	&lowest_freq.attr,
 188	NULL
 189};
 190
 191static struct kobj_type cppc_ktype = {
 192	.sysfs_ops = &kobj_sysfs_ops,
 193	.default_attrs = cppc_attrs,
 194};
 195
 196static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
 197{
 198	int ret, status;
 199	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 200	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 201		pcc_ss_data->pcc_comm_addr;
 
 
 202
 203	if (!pcc_ss_data->platform_owns_pcc)
 204		return 0;
 205
 206	/*
 207	 * Poll PCC status register every 3us(delay_us) for maximum of
 208	 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
 209	 */
 210	ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
 211					status & PCC_CMD_COMPLETE_MASK, 3,
 212					pcc_ss_data->deadline_us);
 213
 214	if (likely(!ret)) {
 215		pcc_ss_data->platform_owns_pcc = false;
 216		if (chk_err_bit && (status & PCC_ERROR_MASK))
 217			ret = -EIO;
 
 
 
 
 
 
 
 218	}
 219
 220	if (unlikely(ret))
 221		pr_err("PCC check channel failed for ss: %d. ret=%d\n",
 222		       pcc_ss_id, ret);
 
 
 223
 224	return ret;
 225}
 226
 227/*
 228 * This function transfers the ownership of the PCC to the platform
 229 * So it must be called while holding write_lock(pcc_lock)
 230 */
 231static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
 232{
 233	int ret = -EIO, i;
 234	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 235	struct acpi_pcct_shared_memory *generic_comm_base =
 236		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
 237	unsigned int time_delta;
 238
 239	/*
 240	 * For CMD_WRITE we know for a fact the caller should have checked
 241	 * the channel before writing to PCC space
 242	 */
 243	if (cmd == CMD_READ) {
 244		/*
 245		 * If there are pending cpc_writes, then we stole the channel
 246		 * before write completion, so first send a WRITE command to
 247		 * platform
 248		 */
 249		if (pcc_ss_data->pending_pcc_write_cmd)
 250			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
 251
 252		ret = check_pcc_chan(pcc_ss_id, false);
 253		if (ret)
 254			goto end;
 255	} else /* CMD_WRITE */
 256		pcc_ss_data->pending_pcc_write_cmd = FALSE;
 257
 258	/*
 259	 * Handle the Minimum Request Turnaround Time(MRTT)
 260	 * "The minimum amount of time that OSPM must wait after the completion
 261	 * of a command before issuing the next command, in microseconds"
 262	 */
 263	if (pcc_ss_data->pcc_mrtt) {
 264		time_delta = ktime_us_delta(ktime_get(),
 265					    pcc_ss_data->last_cmd_cmpl_time);
 266		if (pcc_ss_data->pcc_mrtt > time_delta)
 267			udelay(pcc_ss_data->pcc_mrtt - time_delta);
 268	}
 269
 270	/*
 271	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 272	 * "The maximum number of periodic requests that the subspace channel can
 273	 * support, reported in commands per minute. 0 indicates no limitation."
 274	 *
 275	 * This parameter should be ideally zero or large enough so that it can
 276	 * handle maximum number of requests that all the cores in the system can
 277	 * collectively generate. If it is not, we will follow the spec and just
 278	 * not send the request to the platform after hitting the MPAR limit in
 279	 * any 60s window
 280	 */
 281	if (pcc_ss_data->pcc_mpar) {
 282		if (pcc_ss_data->mpar_count == 0) {
 283			time_delta = ktime_ms_delta(ktime_get(),
 284						    pcc_ss_data->last_mpar_reset);
 285			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
 286				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
 287					 pcc_ss_id);
 288				ret = -EIO;
 289				goto end;
 290			}
 291			pcc_ss_data->last_mpar_reset = ktime_get();
 292			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
 293		}
 294		pcc_ss_data->mpar_count--;
 295	}
 296
 297	/* Write to the shared comm region. */
 298	writew_relaxed(cmd, &generic_comm_base->command);
 299
 300	/* Flip CMD COMPLETE bit */
 301	writew_relaxed(0, &generic_comm_base->status);
 302
 303	pcc_ss_data->platform_owns_pcc = true;
 304
 305	/* Ring doorbell */
 306	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
 307	if (ret < 0) {
 308		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
 309		       pcc_ss_id, cmd, ret);
 310		goto end;
 311	}
 312
 313	/* wait for completion and check for PCC errro bit */
 314	ret = check_pcc_chan(pcc_ss_id, true);
 315
 316	if (pcc_ss_data->pcc_mrtt)
 317		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
 318
 319	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
 320		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
 321	else
 322		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
 323
 324end:
 325	if (cmd == CMD_WRITE) {
 326		if (unlikely(ret)) {
 327			for_each_possible_cpu(i) {
 328				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 329				if (!desc)
 330					continue;
 331
 332				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
 333					desc->write_cmd_status = ret;
 334			}
 335		}
 336		pcc_ss_data->pcc_write_cnt++;
 337		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
 338	}
 339
 340	return ret;
 341}
 342
 343static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 344{
 345	if (ret < 0)
 346		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 347				*(u16 *)msg, ret);
 348	else
 349		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 350				*(u16 *)msg, ret);
 351}
 352
 353static struct mbox_client cppc_mbox_cl = {
 354	.tx_done = cppc_chan_tx_done,
 355	.knows_txdone = true,
 356};
 357
 358static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 359{
 360	int result = -EFAULT;
 361	acpi_status status = AE_OK;
 362	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 363	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 364	struct acpi_buffer state = {0, NULL};
 365	union acpi_object  *psd = NULL;
 366	struct acpi_psd_package *pdomain;
 367
 368	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
 369					    &buffer, ACPI_TYPE_PACKAGE);
 370	if (status == AE_NOT_FOUND)	/* _PSD is optional */
 371		return 0;
 372	if (ACPI_FAILURE(status))
 373		return -ENODEV;
 374
 375	psd = buffer.pointer;
 376	if (!psd || psd->package.count != 1) {
 377		pr_debug("Invalid _PSD data\n");
 378		goto end;
 379	}
 380
 381	pdomain = &(cpc_ptr->domain_info);
 382
 383	state.length = sizeof(struct acpi_psd_package);
 384	state.pointer = pdomain;
 385
 386	status = acpi_extract_package(&(psd->package.elements[0]),
 387		&format, &state);
 388	if (ACPI_FAILURE(status)) {
 389		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 390		goto end;
 391	}
 392
 393	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 394		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 395		goto end;
 396	}
 397
 398	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 399		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 400		goto end;
 401	}
 402
 403	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 404	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 405	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 406		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 407		goto end;
 408	}
 409
 410	result = 0;
 411end:
 412	kfree(buffer.pointer);
 413	return result;
 414}
 415
 416/**
 417 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 418 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 419 *
 420 *	Return: 0 for success or negative value for err.
 421 */
 422int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
 423{
 424	int count_target;
 425	int retval = 0;
 426	unsigned int i, j;
 427	cpumask_var_t covered_cpus;
 428	struct cppc_cpudata *pr, *match_pr;
 429	struct acpi_psd_package *pdomain;
 430	struct acpi_psd_package *match_pdomain;
 431	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 432
 433	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 434		return -ENOMEM;
 435
 436	/*
 437	 * Now that we have _PSD data from all CPUs, let's setup P-state
 438	 * domain info.
 439	 */
 440	for_each_possible_cpu(i) {
 
 
 
 
 441		if (cpumask_test_cpu(i, covered_cpus))
 442			continue;
 443
 444		pr = all_cpu_data[i];
 445		cpc_ptr = per_cpu(cpc_desc_ptr, i);
 446		if (!cpc_ptr) {
 447			retval = -EFAULT;
 448			goto err_ret;
 449		}
 450
 451		pdomain = &(cpc_ptr->domain_info);
 452		cpumask_set_cpu(i, pr->shared_cpu_map);
 453		cpumask_set_cpu(i, covered_cpus);
 454		if (pdomain->num_processors <= 1)
 455			continue;
 456
 457		/* Validate the Domain info */
 458		count_target = pdomain->num_processors;
 459		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 460			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 461		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 462			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
 463		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 464			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 465
 466		for_each_possible_cpu(j) {
 467			if (i == j)
 468				continue;
 469
 470			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 471			if (!match_cpc_ptr) {
 472				retval = -EFAULT;
 473				goto err_ret;
 474			}
 475
 476			match_pdomain = &(match_cpc_ptr->domain_info);
 477			if (match_pdomain->domain != pdomain->domain)
 478				continue;
 479
 480			/* Here i and j are in the same domain */
 481			if (match_pdomain->num_processors != count_target) {
 482				retval = -EFAULT;
 483				goto err_ret;
 484			}
 485
 486			if (pdomain->coord_type != match_pdomain->coord_type) {
 487				retval = -EFAULT;
 488				goto err_ret;
 489			}
 490
 491			cpumask_set_cpu(j, covered_cpus);
 492			cpumask_set_cpu(j, pr->shared_cpu_map);
 493		}
 494
 495		for_each_cpu(j, pr->shared_cpu_map) {
 496			if (i == j)
 497				continue;
 498
 499			match_pr = all_cpu_data[j];
 
 
 
 
 
 
 
 
 
 
 
 
 
 500			match_pr->shared_type = pr->shared_type;
 501			cpumask_copy(match_pr->shared_cpu_map,
 502				     pr->shared_cpu_map);
 503		}
 504	}
 505	goto out;
 506
 507err_ret:
 508	for_each_possible_cpu(i) {
 509		pr = all_cpu_data[i];
 
 
 510
 511		/* Assume no coordination on any error parsing domain info */
 512		cpumask_clear(pr->shared_cpu_map);
 513		cpumask_set_cpu(i, pr->shared_cpu_map);
 514		pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 
 
 515	}
 516out:
 517	free_cpumask_var(covered_cpus);
 518	return retval;
 519}
 520EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 521
 522static int register_pcc_channel(int pcc_ss_idx)
 523{
 524	struct acpi_pcct_hw_reduced *cppc_ss;
 525	u64 usecs_lat;
 526
 527	if (pcc_ss_idx >= 0) {
 528		pcc_data[pcc_ss_idx]->pcc_channel =
 529			pcc_mbox_request_channel(&cppc_mbox_cl,	pcc_ss_idx);
 530
 531		if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
 532			pr_err("Failed to find PCC channel for subspace %d\n",
 533			       pcc_ss_idx);
 534			return -ENODEV;
 535		}
 536
 537		/*
 538		 * The PCC mailbox controller driver should
 539		 * have parsed the PCCT (global table of all
 540		 * PCC channels) and stored pointers to the
 541		 * subspace communication region in con_priv.
 542		 */
 543		cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
 544
 545		if (!cppc_ss) {
 546			pr_err("No PCC subspace found for %d CPPC\n",
 547			       pcc_ss_idx);
 548			return -ENODEV;
 549		}
 550
 551		/*
 552		 * cppc_ss->latency is just a Nominal value. In reality
 553		 * the remote processor could be much slower to reply.
 554		 * So add an arbitrary amount of wait on top of Nominal.
 555		 */
 556		usecs_lat = NUM_RETRIES * cppc_ss->latency;
 557		pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
 558		pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
 559		pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
 560		pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
 561
 562		pcc_data[pcc_ss_idx]->pcc_comm_addr =
 563			acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
 564		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
 565			pr_err("Failed to ioremap PCC comm region mem for %d\n",
 566			       pcc_ss_idx);
 567			return -ENOMEM;
 568		}
 569
 570		/* Set flag so that we don't come here for each CPU. */
 571		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
 572	}
 573
 574	return 0;
 575}
 576
 577/**
 578 * cpc_ffh_supported() - check if FFH reading supported
 579 *
 580 * Check if the architecture has support for functional fixed hardware
 581 * read/write capability.
 582 *
 583 * Return: true for supported, false for not supported
 584 */
 585bool __weak cpc_ffh_supported(void)
 586{
 587	return false;
 588}
 589
 
 590/**
 591 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 592 *
 593 * Check and allocate the cppc_pcc_data memory.
 594 * In some processor configurations it is possible that same subspace
 595 * is shared between multiple CPUs. This is seen especially in CPUs
 596 * with hardware multi-threading support.
 597 *
 598 * Return: 0 for success, errno for failure
 599 */
 600static int pcc_data_alloc(int pcc_ss_id)
 601{
 602	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
 603		return -EINVAL;
 604
 605	if (pcc_data[pcc_ss_id]) {
 606		pcc_data[pcc_ss_id]->refcount++;
 607	} else {
 608		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
 609					      GFP_KERNEL);
 610		if (!pcc_data[pcc_ss_id])
 611			return -ENOMEM;
 612		pcc_data[pcc_ss_id]->refcount++;
 613	}
 614
 615	return 0;
 616}
 617
 618/* Check if CPPC revision + num_ent combination is supported */
 619static bool is_cppc_supported(int revision, int num_ent)
 620{
 621	int expected_num_ent;
 622
 623	switch (revision) {
 624	case CPPC_V2_REV:
 625		expected_num_ent = CPPC_V2_NUM_ENT;
 626		break;
 627	case CPPC_V3_REV:
 628		expected_num_ent = CPPC_V3_NUM_ENT;
 629		break;
 630	default:
 631		pr_debug("Firmware exports unsupported CPPC revision: %d\n",
 632			revision);
 633		return false;
 634	}
 635
 636	if (expected_num_ent != num_ent) {
 637		pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
 638			num_ent, expected_num_ent, revision);
 639		return false;
 640	}
 641
 642	return true;
 643}
 644
 645/*
 646 * An example CPC table looks like the following.
 647 *
 648 *	Name(_CPC, Package()
 649 *			{
 650 *			17,
 651 *			NumEntries
 652 *			1,
 653 *			// Revision
 654 *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 655 *			// Highest Performance
 656 *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 657 *			// Nominal Performance
 658 *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 659 *			// Lowest Nonlinear Performance
 660 *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 661 *			// Lowest Performance
 662 *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 663 *			// Guaranteed Performance Register
 664 *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 665 *			// Desired Performance Register
 666 *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 667 *			..
 668 *			..
 669 *			..
 670 *
 671 *		}
 672 * Each Register() encodes how to access that specific register.
 673 * e.g. a sample PCC entry has the following encoding:
 674 *
 675 *	Register (
 676 *		PCC,
 677 *		AddressSpaceKeyword
 678 *		8,
 679 *		//RegisterBitWidth
 680 *		8,
 681 *		//RegisterBitOffset
 682 *		0x30,
 683 *		//RegisterAddress
 684 *		9
 685 *		//AccessSize (subspace ID)
 686 *		0
 687 *		)
 688 *	}
 689 */
 690
 691/**
 692 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 693 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 694 *
 695 *	Return: 0 for success or negative value for err.
 696 */
 697int acpi_cppc_processor_probe(struct acpi_processor *pr)
 698{
 699	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 700	union acpi_object *out_obj, *cpc_obj;
 701	struct cpc_desc *cpc_ptr;
 702	struct cpc_reg *gas_t;
 703	struct device *cpu_dev;
 704	acpi_handle handle = pr->handle;
 705	unsigned int num_ent, i, cpc_rev;
 706	int pcc_subspace_id = -1;
 707	acpi_status status;
 708	int ret = -EFAULT;
 709
 710	/* Parse the ACPI _CPC table for this CPU. */
 711	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 712			ACPI_TYPE_PACKAGE);
 713	if (ACPI_FAILURE(status)) {
 714		ret = -ENODEV;
 715		goto out_buf_free;
 716	}
 717
 718	out_obj = (union acpi_object *) output.pointer;
 719
 720	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 721	if (!cpc_ptr) {
 722		ret = -ENOMEM;
 723		goto out_buf_free;
 724	}
 725
 726	/* First entry is NumEntries. */
 727	cpc_obj = &out_obj->package.elements[0];
 728	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 729		num_ent = cpc_obj->integer.value;
 730	} else {
 731		pr_debug("Unexpected entry type(%d) for NumEntries\n",
 732				cpc_obj->type);
 733		goto out_free;
 734	}
 
 
 
 
 
 
 
 
 735	cpc_ptr->num_entries = num_ent;
 736
 737	/* Second entry should be revision. */
 738	cpc_obj = &out_obj->package.elements[1];
 739	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 740		cpc_rev = cpc_obj->integer.value;
 741	} else {
 742		pr_debug("Unexpected entry type(%d) for Revision\n",
 743				cpc_obj->type);
 744		goto out_free;
 745	}
 746	cpc_ptr->version = cpc_rev;
 747
 748	if (!is_cppc_supported(cpc_rev, num_ent))
 
 
 749		goto out_free;
 
 750
 751	/* Iterate through remaining entries in _CPC */
 752	for (i = 2; i < num_ent; i++) {
 753		cpc_obj = &out_obj->package.elements[i];
 754
 755		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 756			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 757			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 758		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 759			gas_t = (struct cpc_reg *)
 760				cpc_obj->buffer.pointer;
 761
 762			/*
 763			 * The PCC Subspace index is encoded inside
 764			 * the CPC table entries. The same PCC index
 765			 * will be used for all the PCC entries,
 766			 * so extract it only once.
 767			 */
 768			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 769				if (pcc_subspace_id < 0) {
 770					pcc_subspace_id = gas_t->access_width;
 771					if (pcc_data_alloc(pcc_subspace_id))
 772						goto out_free;
 773				} else if (pcc_subspace_id != gas_t->access_width) {
 774					pr_debug("Mismatched PCC ids.\n");
 775					goto out_free;
 776				}
 777			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 778				if (gas_t->address) {
 779					void __iomem *addr;
 780
 781					addr = ioremap(gas_t->address, gas_t->bit_width/8);
 782					if (!addr)
 783						goto out_free;
 784					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 785				}
 786			} else {
 787				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 788					/* Support only PCC ,SYS MEM and FFH type regs */
 789					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
 790					goto out_free;
 791				}
 792			}
 793
 794			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 795			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 796		} else {
 797			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
 798			goto out_free;
 799		}
 800	}
 801	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
 802
 803	/*
 804	 * Initialize the remaining cpc_regs as unsupported.
 805	 * Example: In case FW exposes CPPC v2, the below loop will initialize
 806	 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
 807	 */
 808	for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
 809		cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
 810		cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
 811	}
 812
 813
 814	/* Store CPU Logical ID */
 815	cpc_ptr->cpu_id = pr->id;
 816
 817	/* Parse PSD data for this CPU */
 818	ret = acpi_get_psd(cpc_ptr, handle);
 819	if (ret)
 820		goto out_free;
 821
 822	/* Register PCC channel once for all PCC subspace ID. */
 823	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
 824		ret = register_pcc_channel(pcc_subspace_id);
 825		if (ret)
 826			goto out_free;
 827
 828		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
 829		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
 830	}
 831
 832	/* Everything looks okay */
 833	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 834
 835	/* Add per logical CPU nodes for reading its feedback counters. */
 836	cpu_dev = get_cpu_device(pr->id);
 837	if (!cpu_dev) {
 838		ret = -EINVAL;
 839		goto out_free;
 840	}
 841
 842	/* Plug PSD data into this CPU's CPC descriptor. */
 843	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 844
 845	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 846			"acpi_cppc");
 847	if (ret) {
 848		per_cpu(cpc_desc_ptr, pr->id) = NULL;
 849		kobject_put(&cpc_ptr->kobj);
 850		goto out_free;
 851	}
 852
 853	kfree(output.pointer);
 854	return 0;
 855
 856out_free:
 857	/* Free all the mapped sys mem areas for this CPU */
 858	for (i = 2; i < cpc_ptr->num_entries; i++) {
 859		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 860
 861		if (addr)
 862			iounmap(addr);
 863	}
 864	kfree(cpc_ptr);
 865
 866out_buf_free:
 867	kfree(output.pointer);
 868	return ret;
 869}
 870EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 871
 872/**
 873 * acpi_cppc_processor_exit - Cleanup CPC structs.
 874 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 875 *
 876 * Return: Void
 877 */
 878void acpi_cppc_processor_exit(struct acpi_processor *pr)
 879{
 880	struct cpc_desc *cpc_ptr;
 881	unsigned int i;
 882	void __iomem *addr;
 883	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
 884
 885	if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
 886		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
 887			pcc_data[pcc_ss_id]->refcount--;
 888			if (!pcc_data[pcc_ss_id]->refcount) {
 889				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
 
 890				kfree(pcc_data[pcc_ss_id]);
 891				pcc_data[pcc_ss_id] = NULL;
 892			}
 893		}
 894	}
 895
 896	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 897	if (!cpc_ptr)
 898		return;
 899
 900	/* Free all the mapped sys mem areas for this CPU */
 901	for (i = 2; i < cpc_ptr->num_entries; i++) {
 902		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 903		if (addr)
 904			iounmap(addr);
 905	}
 906
 907	kobject_put(&cpc_ptr->kobj);
 908	kfree(cpc_ptr);
 909}
 910EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 911
 912/**
 913 * cpc_read_ffh() - Read FFH register
 914 * @cpunum:	CPU number to read
 915 * @reg:	cppc register information
 916 * @val:	place holder for return value
 917 *
 918 * Read bit_width bits from a specified address and bit_offset
 919 *
 920 * Return: 0 for success and error code
 921 */
 922int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 923{
 924	return -ENOTSUPP;
 925}
 926
 927/**
 928 * cpc_write_ffh() - Write FFH register
 929 * @cpunum:	CPU number to write
 930 * @reg:	cppc register information
 931 * @val:	value to write
 932 *
 933 * Write value of bit_width bits to a specified address and bit_offset
 934 *
 935 * Return: 0 for success and error code
 936 */
 937int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 938{
 939	return -ENOTSUPP;
 940}
 941
 942/*
 943 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 944 * as fast as possible. We have already mapped the PCC subspace during init, so
 945 * we can directly write to it.
 946 */
 947
 948static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 949{
 950	int ret_val = 0;
 951	void __iomem *vaddr = 0;
 952	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 953	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 954
 955	if (reg_res->type == ACPI_TYPE_INTEGER) {
 956		*val = reg_res->cpc_entry.int_value;
 957		return ret_val;
 958	}
 959
 960	*val = 0;
 961	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 962		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
 963	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 964		vaddr = reg_res->sys_mem_vaddr;
 965	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 966		return cpc_read_ffh(cpu, reg, val);
 967	else
 968		return acpi_os_read_memory((acpi_physical_address)reg->address,
 969				val, reg->bit_width);
 970
 971	switch (reg->bit_width) {
 972		case 8:
 973			*val = readb_relaxed(vaddr);
 974			break;
 975		case 16:
 976			*val = readw_relaxed(vaddr);
 977			break;
 978		case 32:
 979			*val = readl_relaxed(vaddr);
 980			break;
 981		case 64:
 982			*val = readq_relaxed(vaddr);
 983			break;
 984		default:
 985			pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
 986				 reg->bit_width, pcc_ss_id);
 987			ret_val = -EFAULT;
 988	}
 989
 990	return ret_val;
 991}
 992
 993static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
 994{
 995	int ret_val = 0;
 996	void __iomem *vaddr = 0;
 997	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 998	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 999
1000	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1001		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1002	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1003		vaddr = reg_res->sys_mem_vaddr;
1004	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1005		return cpc_write_ffh(cpu, reg, val);
1006	else
1007		return acpi_os_write_memory((acpi_physical_address)reg->address,
1008				val, reg->bit_width);
1009
1010	switch (reg->bit_width) {
1011		case 8:
1012			writeb_relaxed(val, vaddr);
1013			break;
1014		case 16:
1015			writew_relaxed(val, vaddr);
1016			break;
1017		case 32:
1018			writel_relaxed(val, vaddr);
1019			break;
1020		case 64:
1021			writeq_relaxed(val, vaddr);
1022			break;
1023		default:
1024			pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1025				 reg->bit_width, pcc_ss_id);
1026			ret_val = -EFAULT;
1027			break;
1028	}
1029
1030	return ret_val;
1031}
1032
1033/**
1034 * cppc_get_desired_perf - Get the value of desired performance register.
1035 * @cpunum: CPU from which to get desired performance.
1036 * @desired_perf: address of a variable to store the returned desired performance
1037 *
1038 * Return: 0 for success, -EIO otherwise.
1039 */
1040int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1041{
1042	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1043	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1044	struct cpc_register_resource *desired_reg;
1045	struct cppc_pcc_data *pcc_ss_data = NULL;
1046
1047	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1048
1049	if (CPC_IN_PCC(desired_reg)) {
1050		int ret = 0;
1051
1052		if (pcc_ss_id < 0)
1053			return -EIO;
1054
1055		pcc_ss_data = pcc_data[pcc_ss_id];
1056
1057		down_write(&pcc_ss_data->pcc_lock);
1058
1059		if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1060			cpc_read(cpunum, desired_reg, desired_perf);
1061		else
1062			ret = -EIO;
1063
1064		up_write(&pcc_ss_data->pcc_lock);
1065
1066		return ret;
1067	}
1068
1069	cpc_read(cpunum, desired_reg, desired_perf);
1070
1071	return 0;
1072}
1073EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1074
1075/**
1076 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1077 * @cpunum: CPU from which to get capabilities info.
1078 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1079 *
1080 * Return: 0 for success with perf_caps populated else -ERRNO.
1081 */
1082int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1083{
1084	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1085	struct cpc_register_resource *highest_reg, *lowest_reg,
1086		*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1087		*low_freq_reg = NULL, *nom_freq_reg = NULL;
1088	u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1089	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1090	struct cppc_pcc_data *pcc_ss_data = NULL;
1091	int ret = 0, regs_in_pcc = 0;
1092
1093	if (!cpc_desc) {
1094		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1095		return -ENODEV;
1096	}
1097
 
1098	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1099	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1100	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1101	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1102	low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1103	nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1104	guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1105
1106	/* Are any of the regs PCC ?*/
1107	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1108		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1109		CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1110		if (pcc_ss_id < 0) {
1111			pr_debug("Invalid pcc_ss_id\n");
1112			return -ENODEV;
1113		}
1114		pcc_ss_data = pcc_data[pcc_ss_id];
1115		regs_in_pcc = 1;
1116		down_write(&pcc_ss_data->pcc_lock);
1117		/* Ring doorbell once to update PCC subspace */
1118		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1119			ret = -EIO;
1120			goto out_err;
1121		}
1122	}
1123
1124	cpc_read(cpunum, highest_reg, &high);
1125	perf_caps->highest_perf = high;
1126
1127	cpc_read(cpunum, lowest_reg, &low);
1128	perf_caps->lowest_perf = low;
1129
1130	cpc_read(cpunum, nominal_reg, &nom);
1131	perf_caps->nominal_perf = nom;
1132
1133	if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1134	    IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1135		perf_caps->guaranteed_perf = 0;
1136	} else {
1137		cpc_read(cpunum, guaranteed_reg, &guaranteed);
1138		perf_caps->guaranteed_perf = guaranteed;
1139	}
1140
1141	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1142	perf_caps->lowest_nonlinear_perf = min_nonlinear;
1143
1144	if (!high || !low || !nom || !min_nonlinear)
1145		ret = -EFAULT;
1146
1147	/* Read optional lowest and nominal frequencies if present */
1148	if (CPC_SUPPORTED(low_freq_reg))
1149		cpc_read(cpunum, low_freq_reg, &low_f);
1150
1151	if (CPC_SUPPORTED(nom_freq_reg))
1152		cpc_read(cpunum, nom_freq_reg, &nom_f);
1153
1154	perf_caps->lowest_freq = low_f;
1155	perf_caps->nominal_freq = nom_f;
1156
1157
1158out_err:
1159	if (regs_in_pcc)
1160		up_write(&pcc_ss_data->pcc_lock);
1161	return ret;
1162}
1163EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1164
1165/**
1166 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1167 * @cpunum: CPU from which to read counters.
1168 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1169 *
1170 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1171 */
1172int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1173{
1174	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1175	struct cpc_register_resource *delivered_reg, *reference_reg,
1176		*ref_perf_reg, *ctr_wrap_reg;
1177	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1178	struct cppc_pcc_data *pcc_ss_data = NULL;
1179	u64 delivered, reference, ref_perf, ctr_wrap_time;
1180	int ret = 0, regs_in_pcc = 0;
1181
1182	if (!cpc_desc) {
1183		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1184		return -ENODEV;
1185	}
1186
 
1187	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1188	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1189	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1190	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1191
1192	/*
1193	 * If reference perf register is not supported then we should
1194	 * use the nominal perf value
1195	 */
1196	if (!CPC_SUPPORTED(ref_perf_reg))
1197		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1198
1199	/* Are any of the regs PCC ?*/
1200	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1201		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1202		if (pcc_ss_id < 0) {
1203			pr_debug("Invalid pcc_ss_id\n");
1204			return -ENODEV;
1205		}
1206		pcc_ss_data = pcc_data[pcc_ss_id];
1207		down_write(&pcc_ss_data->pcc_lock);
1208		regs_in_pcc = 1;
1209		/* Ring doorbell once to update PCC subspace */
1210		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1211			ret = -EIO;
1212			goto out_err;
1213		}
1214	}
1215
1216	cpc_read(cpunum, delivered_reg, &delivered);
1217	cpc_read(cpunum, reference_reg, &reference);
1218	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1219
1220	/*
1221	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1222	 * performance counters are assumed to never wrap during the lifetime of
1223	 * platform
1224	 */
1225	ctr_wrap_time = (u64)(~((u64)0));
1226	if (CPC_SUPPORTED(ctr_wrap_reg))
1227		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1228
1229	if (!delivered || !reference ||	!ref_perf) {
1230		ret = -EFAULT;
1231		goto out_err;
1232	}
1233
1234	perf_fb_ctrs->delivered = delivered;
1235	perf_fb_ctrs->reference = reference;
1236	perf_fb_ctrs->reference_perf = ref_perf;
1237	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1238out_err:
1239	if (regs_in_pcc)
1240		up_write(&pcc_ss_data->pcc_lock);
1241	return ret;
1242}
1243EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1244
1245/**
1246 * cppc_set_perf - Set a CPU's performance controls.
1247 * @cpu: CPU for which to set performance controls.
1248 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1249 *
1250 * Return: 0 for success, -ERRNO otherwise.
1251 */
1252int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1253{
1254	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1255	struct cpc_register_resource *desired_reg;
1256	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1257	struct cppc_pcc_data *pcc_ss_data = NULL;
1258	int ret = 0;
1259
1260	if (!cpc_desc) {
1261		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1262		return -ENODEV;
1263	}
1264
 
1265	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1266
1267	/*
1268	 * This is Phase-I where we want to write to CPC registers
1269	 * -> We want all CPUs to be able to execute this phase in parallel
1270	 *
1271	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1272	 * achieve that goal here
1273	 */
1274	if (CPC_IN_PCC(desired_reg)) {
1275		if (pcc_ss_id < 0) {
1276			pr_debug("Invalid pcc_ss_id\n");
1277			return -ENODEV;
1278		}
1279		pcc_ss_data = pcc_data[pcc_ss_id];
1280		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1281		if (pcc_ss_data->platform_owns_pcc) {
1282			ret = check_pcc_chan(pcc_ss_id, false);
1283			if (ret) {
1284				up_read(&pcc_ss_data->pcc_lock);
1285				return ret;
1286			}
1287		}
1288		/*
1289		 * Update the pending_write to make sure a PCC CMD_READ will not
1290		 * arrive and steal the channel during the switch to write lock
1291		 */
1292		pcc_ss_data->pending_pcc_write_cmd = true;
1293		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1294		cpc_desc->write_cmd_status = 0;
1295	}
1296
1297	/*
1298	 * Skip writing MIN/MAX until Linux knows how to come up with
1299	 * useful values.
1300	 */
1301	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1302
1303	if (CPC_IN_PCC(desired_reg))
1304		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1305	/*
1306	 * This is Phase-II where we transfer the ownership of PCC to Platform
1307	 *
1308	 * Short Summary: Basically if we think of a group of cppc_set_perf
1309	 * requests that happened in short overlapping interval. The last CPU to
1310	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1311	 *
1312	 * We have the following requirements for Phase-II:
1313	 *     1. We want to execute Phase-II only when there are no CPUs
1314	 * currently executing in Phase-I
1315	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1316	 * entering Phase-I.
1317	 *     3. We want only one CPU among all those who went through Phase-I
1318	 * to run phase-II
1319	 *
1320	 * If write_trylock fails to get the lock and doesn't transfer the
1321	 * PCC ownership to the platform, then one of the following will be TRUE
1322	 *     1. There is at-least one CPU in Phase-I which will later execute
1323	 * write_trylock, so the CPUs in Phase-I will be responsible for
1324	 * executing the Phase-II.
1325	 *     2. Some other CPU has beaten this CPU to successfully execute the
1326	 * write_trylock and has already acquired the write_lock. We know for a
1327	 * fact it (other CPU acquiring the write_lock) couldn't have happened
1328	 * before this CPU's Phase-I as we held the read_lock.
1329	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1330	 * down_write, in which case, send_pcc_cmd will check for pending
1331	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1332	 * So this CPU can be certain that its request will be delivered
1333	 *    So in all cases, this CPU knows that its request will be delivered
1334	 * by another CPU and can return
1335	 *
1336	 * After getting the down_write we still need to check for
1337	 * pending_pcc_write_cmd to take care of the following scenario
1338	 *    The thread running this code could be scheduled out between
1339	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1340	 * could have delivered the request to Platform by triggering the
1341	 * doorbell and transferred the ownership of PCC to platform. So this
1342	 * avoids triggering an unnecessary doorbell and more importantly before
1343	 * triggering the doorbell it makes sure that the PCC channel ownership
1344	 * is still with OSPM.
1345	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1346	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1347	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1348	 * case during a CMD_READ and if there are pending writes it delivers
1349	 * the write command before servicing the read command
1350	 */
1351	if (CPC_IN_PCC(desired_reg)) {
1352		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1353			/* Update only if there are pending write commands */
1354			if (pcc_ss_data->pending_pcc_write_cmd)
1355				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1356			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1357		} else
1358			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1359			wait_event(pcc_ss_data->pcc_write_wait_q,
1360				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1361
1362		/* send_pcc_cmd updates the status in case of failure */
1363		ret = cpc_desc->write_cmd_status;
1364	}
1365	return ret;
1366}
1367EXPORT_SYMBOL_GPL(cppc_set_perf);
1368
1369/**
1370 * cppc_get_transition_latency - returns frequency transition latency in ns
1371 *
1372 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1373 * transition latency for perfromance change requests. The closest we have
1374 * is the timing information from the PCCT tables which provides the info
1375 * on the number and frequency of PCC commands the platform can handle.
1376 */
1377unsigned int cppc_get_transition_latency(int cpu_num)
1378{
1379	/*
1380	 * Expected transition latency is based on the PCCT timing values
1381	 * Below are definition from ACPI spec:
1382	 * pcc_nominal- Expected latency to process a command, in microseconds
1383	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1384	 *              channel can support, reported in commands per minute. 0
1385	 *              indicates no limitation.
1386	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1387	 *              completion of a command before issuing the next command,
1388	 *              in microseconds.
1389	 */
1390	unsigned int latency_ns = 0;
1391	struct cpc_desc *cpc_desc;
1392	struct cpc_register_resource *desired_reg;
1393	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1394	struct cppc_pcc_data *pcc_ss_data;
1395
1396	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1397	if (!cpc_desc)
1398		return CPUFREQ_ETERNAL;
1399
1400	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1401	if (!CPC_IN_PCC(desired_reg))
1402		return CPUFREQ_ETERNAL;
1403
1404	if (pcc_ss_id < 0)
1405		return CPUFREQ_ETERNAL;
1406
1407	pcc_ss_data = pcc_data[pcc_ss_id];
1408	if (pcc_ss_data->pcc_mpar)
1409		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1410
1411	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1412	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1413
1414	return latency_ns;
1415}
1416EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
v4.17
 
   1/*
   2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   3 *
   4 * (C) Copyright 2014, 2015 Linaro Ltd.
   5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 *
  12 * CPPC describes a few methods for controlling CPU performance using
  13 * information from a per CPU table called CPC. This table is described in
  14 * the ACPI v5.0+ specification. The table consists of a list of
  15 * registers which may be memory mapped or hardware registers and also may
  16 * include some static integer values.
  17 *
  18 * CPU performance is on an abstract continuous scale as against a discretized
  19 * P-state scale which is tied to CPU frequency only. In brief, the basic
  20 * operation involves:
  21 *
  22 * - OS makes a CPU performance request. (Can provide min and max bounds)
  23 *
  24 * - Platform (such as BMC) is free to optimize request within requested bounds
  25 *   depending on power/thermal budgets etc.
  26 *
  27 * - Platform conveys its decision back to OS
  28 *
  29 * The communication between OS and platform occurs through another medium
  30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
  31 * mechanism which includes doorbell semantics to indicate register updates.
  32 * See drivers/mailbox/pcc.c for details on PCC.
  33 *
  34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  35 * above specifications.
  36 */
  37
  38#define pr_fmt(fmt)	"ACPI CPPC: " fmt
  39
  40#include <linux/cpufreq.h>
  41#include <linux/delay.h>
 
  42#include <linux/ktime.h>
  43#include <linux/rwsem.h>
  44#include <linux/wait.h>
  45
  46#include <acpi/cppc_acpi.h>
  47
  48struct cppc_pcc_data {
  49	struct mbox_chan *pcc_channel;
  50	void __iomem *pcc_comm_addr;
  51	bool pcc_channel_acquired;
  52	ktime_t deadline;
  53	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  54
  55	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
  56	bool platform_owns_pcc;		/* Ownership of PCC subspace */
  57	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
  58
  59	/*
  60	 * Lock to provide controlled access to the PCC channel.
  61	 *
  62	 * For performance critical usecases(currently cppc_set_perf)
  63	 *	We need to take read_lock and check if channel belongs to OSPM
  64	 * before reading or writing to PCC subspace
  65	 *	We need to take write_lock before transferring the channel
  66	 * ownership to the platform via a Doorbell
  67	 *	This allows us to batch a number of CPPC requests if they happen
  68	 * to originate in about the same time
  69	 *
  70	 * For non-performance critical usecases(init)
  71	 *	Take write_lock for all purposes which gives exclusive access
  72	 */
  73	struct rw_semaphore pcc_lock;
  74
  75	/* Wait queue for CPUs whose requests were batched */
  76	wait_queue_head_t pcc_write_wait_q;
  77	ktime_t last_cmd_cmpl_time;
  78	ktime_t last_mpar_reset;
  79	int mpar_count;
  80	int refcount;
  81};
  82
  83/* Array  to represent the PCC channel per subspace id */
  84static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  85/* The cpu_pcc_subspace_idx containsper CPU subspace id */
  86static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  87
  88/*
  89 * The cpc_desc structure contains the ACPI register details
  90 * as described in the per CPU _CPC tables. The details
  91 * include the type of register (e.g. PCC, System IO, FFH etc.)
  92 * and destination addresses which lets us READ/WRITE CPU performance
  93 * information using the appropriate I/O methods.
  94 */
  95static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  96
  97/* pcc mapped address + header size + offset within PCC subspace */
  98#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  99						0x8 + (offs))
 100
 101/* Check if a CPC register is in PCC */
 102#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
 103				(cpc)->cpc_entry.reg.space_id ==	\
 104				ACPI_ADR_SPACE_PLATFORM_COMM)
 105
 106/* Evalutes to True if reg is a NULL register descriptor */
 107#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 108				(reg)->address == 0 &&			\
 109				(reg)->bit_width == 0 &&		\
 110				(reg)->bit_offset == 0 &&		\
 111				(reg)->access_width == 0)
 112
 113/* Evalutes to True if an optional cpc field is supported */
 114#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
 115				!!(cpc)->cpc_entry.int_value :		\
 116				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
 117/*
 118 * Arbitrary Retries in case the remote processor is slow to respond
 119 * to PCC commands. Keeping it high enough to cover emulators where
 120 * the processors run painfully slow.
 121 */
 122#define NUM_RETRIES 500ULL
 123
 124struct cppc_attr {
 125	struct attribute attr;
 126	ssize_t (*show)(struct kobject *kobj,
 127			struct attribute *attr, char *buf);
 128	ssize_t (*store)(struct kobject *kobj,
 129			struct attribute *attr, const char *c, ssize_t count);
 130};
 131
 132#define define_one_cppc_ro(_name)		\
 133static struct cppc_attr _name =			\
 134__ATTR(_name, 0444, show_##_name, NULL)
 135
 136#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 137
 138#define show_cppc_data(access_fn, struct_name, member_name)		\
 139	static ssize_t show_##member_name(struct kobject *kobj,		\
 140					struct attribute *attr,	char *buf) \
 141	{								\
 142		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
 143		struct struct_name st_name = {0};			\
 144		int ret;						\
 145									\
 146		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
 147		if (ret)						\
 148			return ret;					\
 149									\
 150		return scnprintf(buf, PAGE_SIZE, "%llu\n",		\
 151				(u64)st_name.member_name);		\
 152	}								\
 153	define_one_cppc_ro(member_name)
 154
 155show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
 156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
 157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
 158show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
 
 
 
 159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 161
 162static ssize_t show_feedback_ctrs(struct kobject *kobj,
 163		struct attribute *attr, char *buf)
 164{
 165	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 166	struct cppc_perf_fb_ctrs fb_ctrs = {0};
 167	int ret;
 168
 169	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 170	if (ret)
 171		return ret;
 172
 173	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
 174			fb_ctrs.reference, fb_ctrs.delivered);
 175}
 176define_one_cppc_ro(feedback_ctrs);
 177
 178static struct attribute *cppc_attrs[] = {
 179	&feedback_ctrs.attr,
 180	&reference_perf.attr,
 181	&wraparound_time.attr,
 182	&highest_perf.attr,
 183	&lowest_perf.attr,
 184	&lowest_nonlinear_perf.attr,
 185	&nominal_perf.attr,
 
 
 186	NULL
 187};
 188
 189static struct kobj_type cppc_ktype = {
 190	.sysfs_ops = &kobj_sysfs_ops,
 191	.default_attrs = cppc_attrs,
 192};
 193
 194static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
 195{
 196	int ret = -EIO, status = 0;
 197	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 198	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 199		pcc_ss_data->pcc_comm_addr;
 200	ktime_t next_deadline = ktime_add(ktime_get(),
 201					  pcc_ss_data->deadline);
 202
 203	if (!pcc_ss_data->platform_owns_pcc)
 204		return 0;
 205
 206	/* Retry in case the remote processor was too slow to catch up. */
 207	while (!ktime_after(ktime_get(), next_deadline)) {
 208		/*
 209		 * Per spec, prior to boot the PCC space wil be initialized by
 210		 * platform and should have set the command completion bit when
 211		 * PCC can be used by OSPM
 212		 */
 213		status = readw_relaxed(&generic_comm_base->status);
 214		if (status & PCC_CMD_COMPLETE_MASK) {
 215			ret = 0;
 216			if (chk_err_bit && (status & PCC_ERROR_MASK))
 217				ret = -EIO;
 218			break;
 219		}
 220		/*
 221		 * Reducing the bus traffic in case this loop takes longer than
 222		 * a few retries.
 223		 */
 224		udelay(3);
 225	}
 226
 227	if (likely(!ret))
 228		pcc_ss_data->platform_owns_pcc = false;
 229	else
 230		pr_err("PCC check channel failed for ss: %d. Status=%x\n",
 231		       pcc_ss_id, status);
 232
 233	return ret;
 234}
 235
 236/*
 237 * This function transfers the ownership of the PCC to the platform
 238 * So it must be called while holding write_lock(pcc_lock)
 239 */
 240static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
 241{
 242	int ret = -EIO, i;
 243	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 244	struct acpi_pcct_shared_memory *generic_comm_base =
 245		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
 246	unsigned int time_delta;
 247
 248	/*
 249	 * For CMD_WRITE we know for a fact the caller should have checked
 250	 * the channel before writing to PCC space
 251	 */
 252	if (cmd == CMD_READ) {
 253		/*
 254		 * If there are pending cpc_writes, then we stole the channel
 255		 * before write completion, so first send a WRITE command to
 256		 * platform
 257		 */
 258		if (pcc_ss_data->pending_pcc_write_cmd)
 259			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
 260
 261		ret = check_pcc_chan(pcc_ss_id, false);
 262		if (ret)
 263			goto end;
 264	} else /* CMD_WRITE */
 265		pcc_ss_data->pending_pcc_write_cmd = FALSE;
 266
 267	/*
 268	 * Handle the Minimum Request Turnaround Time(MRTT)
 269	 * "The minimum amount of time that OSPM must wait after the completion
 270	 * of a command before issuing the next command, in microseconds"
 271	 */
 272	if (pcc_ss_data->pcc_mrtt) {
 273		time_delta = ktime_us_delta(ktime_get(),
 274					    pcc_ss_data->last_cmd_cmpl_time);
 275		if (pcc_ss_data->pcc_mrtt > time_delta)
 276			udelay(pcc_ss_data->pcc_mrtt - time_delta);
 277	}
 278
 279	/*
 280	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 281	 * "The maximum number of periodic requests that the subspace channel can
 282	 * support, reported in commands per minute. 0 indicates no limitation."
 283	 *
 284	 * This parameter should be ideally zero or large enough so that it can
 285	 * handle maximum number of requests that all the cores in the system can
 286	 * collectively generate. If it is not, we will follow the spec and just
 287	 * not send the request to the platform after hitting the MPAR limit in
 288	 * any 60s window
 289	 */
 290	if (pcc_ss_data->pcc_mpar) {
 291		if (pcc_ss_data->mpar_count == 0) {
 292			time_delta = ktime_ms_delta(ktime_get(),
 293						    pcc_ss_data->last_mpar_reset);
 294			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
 295				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
 296					 pcc_ss_id);
 297				ret = -EIO;
 298				goto end;
 299			}
 300			pcc_ss_data->last_mpar_reset = ktime_get();
 301			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
 302		}
 303		pcc_ss_data->mpar_count--;
 304	}
 305
 306	/* Write to the shared comm region. */
 307	writew_relaxed(cmd, &generic_comm_base->command);
 308
 309	/* Flip CMD COMPLETE bit */
 310	writew_relaxed(0, &generic_comm_base->status);
 311
 312	pcc_ss_data->platform_owns_pcc = true;
 313
 314	/* Ring doorbell */
 315	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
 316	if (ret < 0) {
 317		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
 318		       pcc_ss_id, cmd, ret);
 319		goto end;
 320	}
 321
 322	/* wait for completion and check for PCC errro bit */
 323	ret = check_pcc_chan(pcc_ss_id, true);
 324
 325	if (pcc_ss_data->pcc_mrtt)
 326		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
 327
 328	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
 329		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
 330	else
 331		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
 332
 333end:
 334	if (cmd == CMD_WRITE) {
 335		if (unlikely(ret)) {
 336			for_each_possible_cpu(i) {
 337				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 338				if (!desc)
 339					continue;
 340
 341				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
 342					desc->write_cmd_status = ret;
 343			}
 344		}
 345		pcc_ss_data->pcc_write_cnt++;
 346		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
 347	}
 348
 349	return ret;
 350}
 351
 352static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 353{
 354	if (ret < 0)
 355		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 356				*(u16 *)msg, ret);
 357	else
 358		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 359				*(u16 *)msg, ret);
 360}
 361
 362struct mbox_client cppc_mbox_cl = {
 363	.tx_done = cppc_chan_tx_done,
 364	.knows_txdone = true,
 365};
 366
 367static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 368{
 369	int result = -EFAULT;
 370	acpi_status status = AE_OK;
 371	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 372	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 373	struct acpi_buffer state = {0, NULL};
 374	union acpi_object  *psd = NULL;
 375	struct acpi_psd_package *pdomain;
 376
 377	status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
 378			ACPI_TYPE_PACKAGE);
 
 
 379	if (ACPI_FAILURE(status))
 380		return -ENODEV;
 381
 382	psd = buffer.pointer;
 383	if (!psd || psd->package.count != 1) {
 384		pr_debug("Invalid _PSD data\n");
 385		goto end;
 386	}
 387
 388	pdomain = &(cpc_ptr->domain_info);
 389
 390	state.length = sizeof(struct acpi_psd_package);
 391	state.pointer = pdomain;
 392
 393	status = acpi_extract_package(&(psd->package.elements[0]),
 394		&format, &state);
 395	if (ACPI_FAILURE(status)) {
 396		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 397		goto end;
 398	}
 399
 400	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 401		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 402		goto end;
 403	}
 404
 405	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 406		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 407		goto end;
 408	}
 409
 410	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 411	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 412	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 413		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 414		goto end;
 415	}
 416
 417	result = 0;
 418end:
 419	kfree(buffer.pointer);
 420	return result;
 421}
 422
 423/**
 424 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 425 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 426 *
 427 *	Return: 0 for success or negative value for err.
 428 */
 429int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
 430{
 431	int count_target;
 432	int retval = 0;
 433	unsigned int i, j;
 434	cpumask_var_t covered_cpus;
 435	struct cppc_cpudata *pr, *match_pr;
 436	struct acpi_psd_package *pdomain;
 437	struct acpi_psd_package *match_pdomain;
 438	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 439
 440	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 441		return -ENOMEM;
 442
 443	/*
 444	 * Now that we have _PSD data from all CPUs, lets setup P-state
 445	 * domain info.
 446	 */
 447	for_each_possible_cpu(i) {
 448		pr = all_cpu_data[i];
 449		if (!pr)
 450			continue;
 451
 452		if (cpumask_test_cpu(i, covered_cpus))
 453			continue;
 454
 
 455		cpc_ptr = per_cpu(cpc_desc_ptr, i);
 456		if (!cpc_ptr) {
 457			retval = -EFAULT;
 458			goto err_ret;
 459		}
 460
 461		pdomain = &(cpc_ptr->domain_info);
 462		cpumask_set_cpu(i, pr->shared_cpu_map);
 463		cpumask_set_cpu(i, covered_cpus);
 464		if (pdomain->num_processors <= 1)
 465			continue;
 466
 467		/* Validate the Domain info */
 468		count_target = pdomain->num_processors;
 469		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 470			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 471		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 472			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
 473		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 474			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 475
 476		for_each_possible_cpu(j) {
 477			if (i == j)
 478				continue;
 479
 480			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 481			if (!match_cpc_ptr) {
 482				retval = -EFAULT;
 483				goto err_ret;
 484			}
 485
 486			match_pdomain = &(match_cpc_ptr->domain_info);
 487			if (match_pdomain->domain != pdomain->domain)
 488				continue;
 489
 490			/* Here i and j are in the same domain */
 491			if (match_pdomain->num_processors != count_target) {
 492				retval = -EFAULT;
 493				goto err_ret;
 494			}
 495
 496			if (pdomain->coord_type != match_pdomain->coord_type) {
 497				retval = -EFAULT;
 498				goto err_ret;
 499			}
 500
 501			cpumask_set_cpu(j, covered_cpus);
 502			cpumask_set_cpu(j, pr->shared_cpu_map);
 503		}
 504
 505		for_each_possible_cpu(j) {
 506			if (i == j)
 507				continue;
 508
 509			match_pr = all_cpu_data[j];
 510			if (!match_pr)
 511				continue;
 512
 513			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 514			if (!match_cpc_ptr) {
 515				retval = -EFAULT;
 516				goto err_ret;
 517			}
 518
 519			match_pdomain = &(match_cpc_ptr->domain_info);
 520			if (match_pdomain->domain != pdomain->domain)
 521				continue;
 522
 523			match_pr->shared_type = pr->shared_type;
 524			cpumask_copy(match_pr->shared_cpu_map,
 525				     pr->shared_cpu_map);
 526		}
 527	}
 
 528
 529err_ret:
 530	for_each_possible_cpu(i) {
 531		pr = all_cpu_data[i];
 532		if (!pr)
 533			continue;
 534
 535		/* Assume no coordination on any error parsing domain info */
 536		if (retval) {
 537			cpumask_clear(pr->shared_cpu_map);
 538			cpumask_set_cpu(i, pr->shared_cpu_map);
 539			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 540		}
 541	}
 542
 543	free_cpumask_var(covered_cpus);
 544	return retval;
 545}
 546EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 547
 548static int register_pcc_channel(int pcc_ss_idx)
 549{
 550	struct acpi_pcct_hw_reduced *cppc_ss;
 551	u64 usecs_lat;
 552
 553	if (pcc_ss_idx >= 0) {
 554		pcc_data[pcc_ss_idx]->pcc_channel =
 555			pcc_mbox_request_channel(&cppc_mbox_cl,	pcc_ss_idx);
 556
 557		if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
 558			pr_err("Failed to find PCC channel for subspace %d\n",
 559			       pcc_ss_idx);
 560			return -ENODEV;
 561		}
 562
 563		/*
 564		 * The PCC mailbox controller driver should
 565		 * have parsed the PCCT (global table of all
 566		 * PCC channels) and stored pointers to the
 567		 * subspace communication region in con_priv.
 568		 */
 569		cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
 570
 571		if (!cppc_ss) {
 572			pr_err("No PCC subspace found for %d CPPC\n",
 573			       pcc_ss_idx);
 574			return -ENODEV;
 575		}
 576
 577		/*
 578		 * cppc_ss->latency is just a Nominal value. In reality
 579		 * the remote processor could be much slower to reply.
 580		 * So add an arbitrary amount of wait on top of Nominal.
 581		 */
 582		usecs_lat = NUM_RETRIES * cppc_ss->latency;
 583		pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
 584		pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
 585		pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
 586		pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
 587
 588		pcc_data[pcc_ss_idx]->pcc_comm_addr =
 589			acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
 590		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
 591			pr_err("Failed to ioremap PCC comm region mem for %d\n",
 592			       pcc_ss_idx);
 593			return -ENOMEM;
 594		}
 595
 596		/* Set flag so that we dont come here for each CPU. */
 597		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
 598	}
 599
 600	return 0;
 601}
 602
 603/**
 604 * cpc_ffh_supported() - check if FFH reading supported
 605 *
 606 * Check if the architecture has support for functional fixed hardware
 607 * read/write capability.
 608 *
 609 * Return: true for supported, false for not supported
 610 */
 611bool __weak cpc_ffh_supported(void)
 612{
 613	return false;
 614}
 615
 616
 617/**
 618 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 619 *
 620 * Check and allocate the cppc_pcc_data memory.
 621 * In some processor configurations it is possible that same subspace
 622 * is shared between multiple CPU's. This is seen especially in CPU's
 623 * with hardware multi-threading support.
 624 *
 625 * Return: 0 for success, errno for failure
 626 */
 627int pcc_data_alloc(int pcc_ss_id)
 628{
 629	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
 630		return -EINVAL;
 631
 632	if (pcc_data[pcc_ss_id]) {
 633		pcc_data[pcc_ss_id]->refcount++;
 634	} else {
 635		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
 636					      GFP_KERNEL);
 637		if (!pcc_data[pcc_ss_id])
 638			return -ENOMEM;
 639		pcc_data[pcc_ss_id]->refcount++;
 640	}
 641
 642	return 0;
 643}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644/*
 645 * An example CPC table looks like the following.
 646 *
 647 *	Name(_CPC, Package()
 648 *			{
 649 *			17,
 650 *			NumEntries
 651 *			1,
 652 *			// Revision
 653 *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 654 *			// Highest Performance
 655 *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 656 *			// Nominal Performance
 657 *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 658 *			// Lowest Nonlinear Performance
 659 *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 660 *			// Lowest Performance
 661 *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 662 *			// Guaranteed Performance Register
 663 *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 664 *			// Desired Performance Register
 665 *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 666 *			..
 667 *			..
 668 *			..
 669 *
 670 *		}
 671 * Each Register() encodes how to access that specific register.
 672 * e.g. a sample PCC entry has the following encoding:
 673 *
 674 *	Register (
 675 *		PCC,
 676 *		AddressSpaceKeyword
 677 *		8,
 678 *		//RegisterBitWidth
 679 *		8,
 680 *		//RegisterBitOffset
 681 *		0x30,
 682 *		//RegisterAddress
 683 *		9
 684 *		//AccessSize (subspace ID)
 685 *		0
 686 *		)
 687 *	}
 688 */
 689
 690/**
 691 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 692 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 693 *
 694 *	Return: 0 for success or negative value for err.
 695 */
 696int acpi_cppc_processor_probe(struct acpi_processor *pr)
 697{
 698	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 699	union acpi_object *out_obj, *cpc_obj;
 700	struct cpc_desc *cpc_ptr;
 701	struct cpc_reg *gas_t;
 702	struct device *cpu_dev;
 703	acpi_handle handle = pr->handle;
 704	unsigned int num_ent, i, cpc_rev;
 705	int pcc_subspace_id = -1;
 706	acpi_status status;
 707	int ret = -EFAULT;
 708
 709	/* Parse the ACPI _CPC table for this cpu. */
 710	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 711			ACPI_TYPE_PACKAGE);
 712	if (ACPI_FAILURE(status)) {
 713		ret = -ENODEV;
 714		goto out_buf_free;
 715	}
 716
 717	out_obj = (union acpi_object *) output.pointer;
 718
 719	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 720	if (!cpc_ptr) {
 721		ret = -ENOMEM;
 722		goto out_buf_free;
 723	}
 724
 725	/* First entry is NumEntries. */
 726	cpc_obj = &out_obj->package.elements[0];
 727	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 728		num_ent = cpc_obj->integer.value;
 729	} else {
 730		pr_debug("Unexpected entry type(%d) for NumEntries\n",
 731				cpc_obj->type);
 732		goto out_free;
 733	}
 734
 735	/* Only support CPPCv2. Bail otherwise. */
 736	if (num_ent != CPPC_NUM_ENT) {
 737		pr_debug("Firmware exports %d entries. Expected: %d\n",
 738				num_ent, CPPC_NUM_ENT);
 739		goto out_free;
 740	}
 741
 742	cpc_ptr->num_entries = num_ent;
 743
 744	/* Second entry should be revision. */
 745	cpc_obj = &out_obj->package.elements[1];
 746	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 747		cpc_rev = cpc_obj->integer.value;
 748	} else {
 749		pr_debug("Unexpected entry type(%d) for Revision\n",
 750				cpc_obj->type);
 751		goto out_free;
 752	}
 
 753
 754	if (cpc_rev != CPPC_REV) {
 755		pr_debug("Firmware exports revision:%d. Expected:%d\n",
 756				cpc_rev, CPPC_REV);
 757		goto out_free;
 758	}
 759
 760	/* Iterate through remaining entries in _CPC */
 761	for (i = 2; i < num_ent; i++) {
 762		cpc_obj = &out_obj->package.elements[i];
 763
 764		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
 765			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 766			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 767		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 768			gas_t = (struct cpc_reg *)
 769				cpc_obj->buffer.pointer;
 770
 771			/*
 772			 * The PCC Subspace index is encoded inside
 773			 * the CPC table entries. The same PCC index
 774			 * will be used for all the PCC entries,
 775			 * so extract it only once.
 776			 */
 777			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 778				if (pcc_subspace_id < 0) {
 779					pcc_subspace_id = gas_t->access_width;
 780					if (pcc_data_alloc(pcc_subspace_id))
 781						goto out_free;
 782				} else if (pcc_subspace_id != gas_t->access_width) {
 783					pr_debug("Mismatched PCC ids.\n");
 784					goto out_free;
 785				}
 786			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 787				if (gas_t->address) {
 788					void __iomem *addr;
 789
 790					addr = ioremap(gas_t->address, gas_t->bit_width/8);
 791					if (!addr)
 792						goto out_free;
 793					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 794				}
 795			} else {
 796				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 797					/* Support only PCC ,SYS MEM and FFH type regs */
 798					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
 799					goto out_free;
 800				}
 801			}
 802
 803			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 804			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 805		} else {
 806			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
 807			goto out_free;
 808		}
 809	}
 810	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
 
 
 
 
 
 
 
 
 
 
 
 
 811	/* Store CPU Logical ID */
 812	cpc_ptr->cpu_id = pr->id;
 813
 814	/* Parse PSD data for this CPU */
 815	ret = acpi_get_psd(cpc_ptr, handle);
 816	if (ret)
 817		goto out_free;
 818
 819	/* Register PCC channel once for all PCC subspace id. */
 820	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
 821		ret = register_pcc_channel(pcc_subspace_id);
 822		if (ret)
 823			goto out_free;
 824
 825		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
 826		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
 827	}
 828
 829	/* Everything looks okay */
 830	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 831
 832	/* Add per logical CPU nodes for reading its feedback counters. */
 833	cpu_dev = get_cpu_device(pr->id);
 834	if (!cpu_dev) {
 835		ret = -EINVAL;
 836		goto out_free;
 837	}
 838
 839	/* Plug PSD data into this CPUs CPC descriptor. */
 840	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 841
 842	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 843			"acpi_cppc");
 844	if (ret) {
 845		per_cpu(cpc_desc_ptr, pr->id) = NULL;
 
 846		goto out_free;
 847	}
 848
 849	kfree(output.pointer);
 850	return 0;
 851
 852out_free:
 853	/* Free all the mapped sys mem areas for this CPU */
 854	for (i = 2; i < cpc_ptr->num_entries; i++) {
 855		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 856
 857		if (addr)
 858			iounmap(addr);
 859	}
 860	kfree(cpc_ptr);
 861
 862out_buf_free:
 863	kfree(output.pointer);
 864	return ret;
 865}
 866EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 867
 868/**
 869 * acpi_cppc_processor_exit - Cleanup CPC structs.
 870 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 871 *
 872 * Return: Void
 873 */
 874void acpi_cppc_processor_exit(struct acpi_processor *pr)
 875{
 876	struct cpc_desc *cpc_ptr;
 877	unsigned int i;
 878	void __iomem *addr;
 879	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
 880
 881	if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
 882		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
 883			pcc_data[pcc_ss_id]->refcount--;
 884			if (!pcc_data[pcc_ss_id]->refcount) {
 885				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
 886				pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
 887				kfree(pcc_data[pcc_ss_id]);
 
 888			}
 889		}
 890	}
 891
 892	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 893	if (!cpc_ptr)
 894		return;
 895
 896	/* Free all the mapped sys mem areas for this CPU */
 897	for (i = 2; i < cpc_ptr->num_entries; i++) {
 898		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 899		if (addr)
 900			iounmap(addr);
 901	}
 902
 903	kobject_put(&cpc_ptr->kobj);
 904	kfree(cpc_ptr);
 905}
 906EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 907
 908/**
 909 * cpc_read_ffh() - Read FFH register
 910 * @cpunum:	cpu number to read
 911 * @reg:	cppc register information
 912 * @val:	place holder for return value
 913 *
 914 * Read bit_width bits from a specified address and bit_offset
 915 *
 916 * Return: 0 for success and error code
 917 */
 918int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 919{
 920	return -ENOTSUPP;
 921}
 922
 923/**
 924 * cpc_write_ffh() - Write FFH register
 925 * @cpunum:	cpu number to write
 926 * @reg:	cppc register information
 927 * @val:	value to write
 928 *
 929 * Write value of bit_width bits to a specified address and bit_offset
 930 *
 931 * Return: 0 for success and error code
 932 */
 933int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 934{
 935	return -ENOTSUPP;
 936}
 937
 938/*
 939 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 940 * as fast as possible. We have already mapped the PCC subspace during init, so
 941 * we can directly write to it.
 942 */
 943
 944static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 945{
 946	int ret_val = 0;
 947	void __iomem *vaddr = 0;
 948	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 949	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 950
 951	if (reg_res->type == ACPI_TYPE_INTEGER) {
 952		*val = reg_res->cpc_entry.int_value;
 953		return ret_val;
 954	}
 955
 956	*val = 0;
 957	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 958		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
 959	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 960		vaddr = reg_res->sys_mem_vaddr;
 961	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 962		return cpc_read_ffh(cpu, reg, val);
 963	else
 964		return acpi_os_read_memory((acpi_physical_address)reg->address,
 965				val, reg->bit_width);
 966
 967	switch (reg->bit_width) {
 968		case 8:
 969			*val = readb_relaxed(vaddr);
 970			break;
 971		case 16:
 972			*val = readw_relaxed(vaddr);
 973			break;
 974		case 32:
 975			*val = readl_relaxed(vaddr);
 976			break;
 977		case 64:
 978			*val = readq_relaxed(vaddr);
 979			break;
 980		default:
 981			pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
 982				 reg->bit_width, pcc_ss_id);
 983			ret_val = -EFAULT;
 984	}
 985
 986	return ret_val;
 987}
 988
 989static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
 990{
 991	int ret_val = 0;
 992	void __iomem *vaddr = 0;
 993	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 994	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 995
 996	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 997		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
 998	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 999		vaddr = reg_res->sys_mem_vaddr;
1000	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1001		return cpc_write_ffh(cpu, reg, val);
1002	else
1003		return acpi_os_write_memory((acpi_physical_address)reg->address,
1004				val, reg->bit_width);
1005
1006	switch (reg->bit_width) {
1007		case 8:
1008			writeb_relaxed(val, vaddr);
1009			break;
1010		case 16:
1011			writew_relaxed(val, vaddr);
1012			break;
1013		case 32:
1014			writel_relaxed(val, vaddr);
1015			break;
1016		case 64:
1017			writeq_relaxed(val, vaddr);
1018			break;
1019		default:
1020			pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1021				 reg->bit_width, pcc_ss_id);
1022			ret_val = -EFAULT;
1023			break;
1024	}
1025
1026	return ret_val;
1027}
1028
1029/**
1030 * cppc_get_perf_caps - Get a CPUs performance capabilities.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031 * @cpunum: CPU from which to get capabilities info.
1032 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1033 *
1034 * Return: 0 for success with perf_caps populated else -ERRNO.
1035 */
1036int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1037{
1038	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1039	struct cpc_register_resource *highest_reg, *lowest_reg,
1040		*lowest_non_linear_reg, *nominal_reg;
1041	u64 high, low, nom, min_nonlinear;
 
1042	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1043	struct cppc_pcc_data *pcc_ss_data;
1044	int ret = 0, regs_in_pcc = 0;
1045
1046	if (!cpc_desc || pcc_ss_id < 0) {
1047		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1048		return -ENODEV;
1049	}
1050
1051	pcc_ss_data = pcc_data[pcc_ss_id];
1052	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1053	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1054	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1055	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
 
 
 
1056
1057	/* Are any of the regs PCC ?*/
1058	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1059		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
 
 
 
 
 
 
1060		regs_in_pcc = 1;
1061		down_write(&pcc_ss_data->pcc_lock);
1062		/* Ring doorbell once to update PCC subspace */
1063		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1064			ret = -EIO;
1065			goto out_err;
1066		}
1067	}
1068
1069	cpc_read(cpunum, highest_reg, &high);
1070	perf_caps->highest_perf = high;
1071
1072	cpc_read(cpunum, lowest_reg, &low);
1073	perf_caps->lowest_perf = low;
1074
1075	cpc_read(cpunum, nominal_reg, &nom);
1076	perf_caps->nominal_perf = nom;
1077
 
 
 
 
 
 
 
 
1078	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1079	perf_caps->lowest_nonlinear_perf = min_nonlinear;
1080
1081	if (!high || !low || !nom || !min_nonlinear)
1082		ret = -EFAULT;
1083
 
 
 
 
 
 
 
 
 
 
 
1084out_err:
1085	if (regs_in_pcc)
1086		up_write(&pcc_ss_data->pcc_lock);
1087	return ret;
1088}
1089EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1090
1091/**
1092 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1093 * @cpunum: CPU from which to read counters.
1094 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1095 *
1096 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1097 */
1098int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1099{
1100	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1101	struct cpc_register_resource *delivered_reg, *reference_reg,
1102		*ref_perf_reg, *ctr_wrap_reg;
1103	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1104	struct cppc_pcc_data *pcc_ss_data;
1105	u64 delivered, reference, ref_perf, ctr_wrap_time;
1106	int ret = 0, regs_in_pcc = 0;
1107
1108	if (!cpc_desc || pcc_ss_id < 0) {
1109		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1110		return -ENODEV;
1111	}
1112
1113	pcc_ss_data = pcc_data[pcc_ss_id];
1114	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1115	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1116	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1117	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1118
1119	/*
1120	 * If refernce perf register is not supported then we should
1121	 * use the nominal perf value
1122	 */
1123	if (!CPC_SUPPORTED(ref_perf_reg))
1124		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1125
1126	/* Are any of the regs PCC ?*/
1127	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1128		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
 
 
 
 
 
1129		down_write(&pcc_ss_data->pcc_lock);
1130		regs_in_pcc = 1;
1131		/* Ring doorbell once to update PCC subspace */
1132		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1133			ret = -EIO;
1134			goto out_err;
1135		}
1136	}
1137
1138	cpc_read(cpunum, delivered_reg, &delivered);
1139	cpc_read(cpunum, reference_reg, &reference);
1140	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1141
1142	/*
1143	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1144	 * performance counters are assumed to never wrap during the lifetime of
1145	 * platform
1146	 */
1147	ctr_wrap_time = (u64)(~((u64)0));
1148	if (CPC_SUPPORTED(ctr_wrap_reg))
1149		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1150
1151	if (!delivered || !reference ||	!ref_perf) {
1152		ret = -EFAULT;
1153		goto out_err;
1154	}
1155
1156	perf_fb_ctrs->delivered = delivered;
1157	perf_fb_ctrs->reference = reference;
1158	perf_fb_ctrs->reference_perf = ref_perf;
1159	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1160out_err:
1161	if (regs_in_pcc)
1162		up_write(&pcc_ss_data->pcc_lock);
1163	return ret;
1164}
1165EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1166
1167/**
1168 * cppc_set_perf - Set a CPUs performance controls.
1169 * @cpu: CPU for which to set performance controls.
1170 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1171 *
1172 * Return: 0 for success, -ERRNO otherwise.
1173 */
1174int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1175{
1176	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1177	struct cpc_register_resource *desired_reg;
1178	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1179	struct cppc_pcc_data *pcc_ss_data;
1180	int ret = 0;
1181
1182	if (!cpc_desc || pcc_ss_id < 0) {
1183		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1184		return -ENODEV;
1185	}
1186
1187	pcc_ss_data = pcc_data[pcc_ss_id];
1188	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1189
1190	/*
1191	 * This is Phase-I where we want to write to CPC registers
1192	 * -> We want all CPUs to be able to execute this phase in parallel
1193	 *
1194	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1195	 * achieve that goal here
1196	 */
1197	if (CPC_IN_PCC(desired_reg)) {
 
 
 
 
 
1198		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1199		if (pcc_ss_data->platform_owns_pcc) {
1200			ret = check_pcc_chan(pcc_ss_id, false);
1201			if (ret) {
1202				up_read(&pcc_ss_data->pcc_lock);
1203				return ret;
1204			}
1205		}
1206		/*
1207		 * Update the pending_write to make sure a PCC CMD_READ will not
1208		 * arrive and steal the channel during the switch to write lock
1209		 */
1210		pcc_ss_data->pending_pcc_write_cmd = true;
1211		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1212		cpc_desc->write_cmd_status = 0;
1213	}
1214
1215	/*
1216	 * Skip writing MIN/MAX until Linux knows how to come up with
1217	 * useful values.
1218	 */
1219	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1220
1221	if (CPC_IN_PCC(desired_reg))
1222		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1223	/*
1224	 * This is Phase-II where we transfer the ownership of PCC to Platform
1225	 *
1226	 * Short Summary: Basically if we think of a group of cppc_set_perf
1227	 * requests that happened in short overlapping interval. The last CPU to
1228	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1229	 *
1230	 * We have the following requirements for Phase-II:
1231	 *     1. We want to execute Phase-II only when there are no CPUs
1232	 * currently executing in Phase-I
1233	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1234	 * entering Phase-I.
1235	 *     3. We want only one CPU among all those who went through Phase-I
1236	 * to run phase-II
1237	 *
1238	 * If write_trylock fails to get the lock and doesn't transfer the
1239	 * PCC ownership to the platform, then one of the following will be TRUE
1240	 *     1. There is at-least one CPU in Phase-I which will later execute
1241	 * write_trylock, so the CPUs in Phase-I will be responsible for
1242	 * executing the Phase-II.
1243	 *     2. Some other CPU has beaten this CPU to successfully execute the
1244	 * write_trylock and has already acquired the write_lock. We know for a
1245	 * fact it(other CPU acquiring the write_lock) couldn't have happened
1246	 * before this CPU's Phase-I as we held the read_lock.
1247	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1248	 * down_write, in which case, send_pcc_cmd will check for pending
1249	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1250	 * So this CPU can be certain that its request will be delivered
1251	 *    So in all cases, this CPU knows that its request will be delivered
1252	 * by another CPU and can return
1253	 *
1254	 * After getting the down_write we still need to check for
1255	 * pending_pcc_write_cmd to take care of the following scenario
1256	 *    The thread running this code could be scheduled out between
1257	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1258	 * could have delivered the request to Platform by triggering the
1259	 * doorbell and transferred the ownership of PCC to platform. So this
1260	 * avoids triggering an unnecessary doorbell and more importantly before
1261	 * triggering the doorbell it makes sure that the PCC channel ownership
1262	 * is still with OSPM.
1263	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1264	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1265	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1266	 * case during a CMD_READ and if there are pending writes it delivers
1267	 * the write command before servicing the read command
1268	 */
1269	if (CPC_IN_PCC(desired_reg)) {
1270		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1271			/* Update only if there are pending write commands */
1272			if (pcc_ss_data->pending_pcc_write_cmd)
1273				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1274			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1275		} else
1276			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1277			wait_event(pcc_ss_data->pcc_write_wait_q,
1278				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1279
1280		/* send_pcc_cmd updates the status in case of failure */
1281		ret = cpc_desc->write_cmd_status;
1282	}
1283	return ret;
1284}
1285EXPORT_SYMBOL_GPL(cppc_set_perf);
1286
1287/**
1288 * cppc_get_transition_latency - returns frequency transition latency in ns
1289 *
1290 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1291 * transition latency for perfromance change requests. The closest we have
1292 * is the timing information from the PCCT tables which provides the info
1293 * on the number and frequency of PCC commands the platform can handle.
1294 */
1295unsigned int cppc_get_transition_latency(int cpu_num)
1296{
1297	/*
1298	 * Expected transition latency is based on the PCCT timing values
1299	 * Below are definition from ACPI spec:
1300	 * pcc_nominal- Expected latency to process a command, in microseconds
1301	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1302	 *              channel can support, reported in commands per minute. 0
1303	 *              indicates no limitation.
1304	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1305	 *              completion of a command before issuing the next command,
1306	 *              in microseconds.
1307	 */
1308	unsigned int latency_ns = 0;
1309	struct cpc_desc *cpc_desc;
1310	struct cpc_register_resource *desired_reg;
1311	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1312	struct cppc_pcc_data *pcc_ss_data;
1313
1314	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1315	if (!cpc_desc)
1316		return CPUFREQ_ETERNAL;
1317
1318	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1319	if (!CPC_IN_PCC(desired_reg))
1320		return CPUFREQ_ETERNAL;
1321
1322	if (pcc_ss_id < 0)
1323		return CPUFREQ_ETERNAL;
1324
1325	pcc_ss_data = pcc_data[pcc_ss_id];
1326	if (pcc_ss_data->pcc_mpar)
1327		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1328
1329	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1330	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1331
1332	return latency_ns;
1333}
1334EXPORT_SYMBOL_GPL(cppc_get_transition_latency);