Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Intel Core SoC Power Management Controller Driver
   4 *
   5 * Copyright (c) 2016, Intel Corporation.
   6 * All Rights Reserved.
   7 *
   8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
   9 *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/bitfield.h>
  15#include <linux/debugfs.h>
  16#include <linux/delay.h>
  17#include <linux/dmi.h>
  18#include <linux/io.h>
  19#include <linux/module.h>
  20#include <linux/pci.h>
  21#include <linux/slab.h>
  22#include <linux/suspend.h>
  23
  24#include <asm/cpu_device_id.h>
  25#include <asm/intel-family.h>
  26#include <asm/msr.h>
  27#include <asm/tsc.h>
  28
  29#include "core.h"
  30
  31/* Maximum number of modes supported by platfoms that has low power mode capability */
  32const char *pmc_lpm_modes[] = {
  33	"S0i2.0",
  34	"S0i2.1",
  35	"S0i2.2",
  36	"S0i3.0",
  37	"S0i3.1",
  38	"S0i3.2",
  39	"S0i3.3",
  40	"S0i3.4",
  41	NULL
  42};
  43
  44/* PKGC MSRs are common across Intel Core SoCs */
  45const struct pmc_bit_map msr_map[] = {
  46	{"Package C2",                  MSR_PKG_C2_RESIDENCY},
  47	{"Package C3",                  MSR_PKG_C3_RESIDENCY},
  48	{"Package C6",                  MSR_PKG_C6_RESIDENCY},
  49	{"Package C7",                  MSR_PKG_C7_RESIDENCY},
  50	{"Package C8",                  MSR_PKG_C8_RESIDENCY},
  51	{"Package C9",                  MSR_PKG_C9_RESIDENCY},
  52	{"Package C10",                 MSR_PKG_C10_RESIDENCY},
  53	{}
  54};
  55
  56static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
  57{
  58	return readl(pmcdev->regbase + reg_offset);
  59}
  60
  61static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
  62				      u32 val)
  63{
  64	writel(val, pmcdev->regbase + reg_offset);
  65}
  66
  67static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
  68{
  69	return (u64)value * pmcdev->map->slp_s0_res_counter_step;
  70}
  71
  72static int set_etr3(struct pmc_dev *pmcdev)
  73{
  74	const struct pmc_reg_map *map = pmcdev->map;
  75	u32 reg;
  76	int err;
  77
  78	if (!map->etr3_offset)
  79		return -EOPNOTSUPP;
  80
  81	mutex_lock(&pmcdev->lock);
  82
  83	/* check if CF9 is locked */
  84	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
  85	if (reg & ETR3_CF9LOCK) {
  86		err = -EACCES;
  87		goto out_unlock;
  88	}
  89
  90	/* write CF9 global reset bit */
  91	reg |= ETR3_CF9GR;
  92	pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
  93
  94	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
  95	if (!(reg & ETR3_CF9GR)) {
  96		err = -EIO;
  97		goto out_unlock;
  98	}
  99
 100	err = 0;
 101
 102out_unlock:
 103	mutex_unlock(&pmcdev->lock);
 104	return err;
 105}
 106static umode_t etr3_is_visible(struct kobject *kobj,
 107				struct attribute *attr,
 108				int idx)
 109{
 110	struct device *dev = kobj_to_dev(kobj);
 111	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 112	const struct pmc_reg_map *map = pmcdev->map;
 113	u32 reg;
 114
 115	mutex_lock(&pmcdev->lock);
 116	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
 117	mutex_unlock(&pmcdev->lock);
 118
 119	return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
 120}
 121
 122static ssize_t etr3_show(struct device *dev,
 123				 struct device_attribute *attr, char *buf)
 124{
 125	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 126	const struct pmc_reg_map *map = pmcdev->map;
 127	u32 reg;
 128
 129	if (!map->etr3_offset)
 130		return -EOPNOTSUPP;
 131
 132	mutex_lock(&pmcdev->lock);
 133
 134	reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
 135	reg &= ETR3_CF9GR | ETR3_CF9LOCK;
 136
 137	mutex_unlock(&pmcdev->lock);
 138
 139	return sysfs_emit(buf, "0x%08x", reg);
 140}
 141
 142static ssize_t etr3_store(struct device *dev,
 143				  struct device_attribute *attr,
 144				  const char *buf, size_t len)
 145{
 146	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 147	int err;
 148	u32 reg;
 149
 150	err = kstrtouint(buf, 16, &reg);
 151	if (err)
 152		return err;
 153
 154	/* allow only CF9 writes */
 155	if (reg != ETR3_CF9GR)
 156		return -EINVAL;
 157
 158	err = set_etr3(pmcdev);
 159	if (err)
 160		return err;
 161
 162	return len;
 163}
 164static DEVICE_ATTR_RW(etr3);
 165
 166static struct attribute *pmc_attrs[] = {
 167	&dev_attr_etr3.attr,
 168	NULL
 169};
 170
 171static const struct attribute_group pmc_attr_group = {
 172	.attrs = pmc_attrs,
 173	.is_visible = etr3_is_visible,
 174};
 175
 176static const struct attribute_group *pmc_dev_groups[] = {
 177	&pmc_attr_group,
 178	NULL
 179};
 180
 181static int pmc_core_dev_state_get(void *data, u64 *val)
 182{
 183	struct pmc_dev *pmcdev = data;
 184	const struct pmc_reg_map *map = pmcdev->map;
 185	u32 value;
 186
 187	value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
 188	*val = pmc_core_adjust_slp_s0_step(pmcdev, value);
 189
 190	return 0;
 191}
 192
 193DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
 194
 195static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
 196{
 197	u32 value;
 198
 199	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
 200	return value & BIT(pmcdev->map->pm_read_disable_bit);
 201}
 202
 203static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
 204				   struct seq_file *s)
 205{
 206	const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
 207	const struct pmc_bit_map *map;
 208	int offset = pmcdev->map->slps0_dbg_offset;
 209	u32 data;
 210
 211	while (*maps) {
 212		map = *maps;
 213		data = pmc_core_reg_read(pmcdev, offset);
 214		offset += 4;
 215		while (map->name) {
 216			if (dev)
 217				dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
 218					map->name,
 219					data & map->bit_mask ? "Yes" : "No");
 220			if (s)
 221				seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
 222					   map->name,
 223					   data & map->bit_mask ? "Yes" : "No");
 224			++map;
 225		}
 226		++maps;
 227	}
 228}
 229
 230static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
 231{
 232	int idx;
 233
 234	for (idx = 0; maps[idx]; idx++)
 235		;/* Nothing */
 236
 237	return idx;
 238}
 239
 240static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
 241				 struct seq_file *s, u32 offset,
 242				 const char *str,
 243				 const struct pmc_bit_map **maps)
 244{
 245	int index, idx, len = 32, bit_mask, arr_size;
 246	u32 *lpm_regs;
 247
 248	arr_size = pmc_core_lpm_get_arr_size(maps);
 249	lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
 250	if (!lpm_regs)
 251		return;
 252
 253	for (index = 0; index < arr_size; index++) {
 254		lpm_regs[index] = pmc_core_reg_read(pmcdev, offset);
 255		offset += 4;
 256	}
 257
 258	for (idx = 0; idx < arr_size; idx++) {
 259		if (dev)
 260			dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
 261				lpm_regs[idx]);
 262		if (s)
 263			seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
 264				   lpm_regs[idx]);
 265		for (index = 0; maps[idx][index].name && index < len; index++) {
 266			bit_mask = maps[idx][index].bit_mask;
 267			if (dev)
 268				dev_info(dev, "%-30s %-30d\n",
 269					maps[idx][index].name,
 270					lpm_regs[idx] & bit_mask ? 1 : 0);
 271			if (s)
 272				seq_printf(s, "%-30s %-30d\n",
 273					   maps[idx][index].name,
 274					   lpm_regs[idx] & bit_mask ? 1 : 0);
 275		}
 276	}
 277
 278	kfree(lpm_regs);
 279}
 280
 281static bool slps0_dbg_latch;
 282
 283static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
 284{
 285	return readb(pmcdev->regbase + offset);
 286}
 287
 288static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
 289				 u8 pf_reg, const struct pmc_bit_map **pf_map)
 290{
 291	seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
 292		   ip, pf_map[idx][index].name,
 293		   pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
 294}
 295
 296static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
 297{
 298	struct pmc_dev *pmcdev = s->private;
 299	const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
 300	u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
 301	int index, iter, idx, ip = 0;
 302
 303	iter = pmcdev->map->ppfear0_offset;
 304
 305	for (index = 0; index < pmcdev->map->ppfear_buckets &&
 306	     index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
 307		pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
 308
 309	for (idx = 0; maps[idx]; idx++) {
 310		for (index = 0; maps[idx][index].name &&
 311		     index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
 312			pmc_core_display_map(s, index, idx, ip,
 313					     pf_regs[index / 8], maps);
 314	}
 315
 316	return 0;
 317}
 318DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
 319
 320/* This function should return link status, 0 means ready */
 321static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
 322{
 323	u32 value;
 324
 325	value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
 326	return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
 327}
 328
 329static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
 330{
 331	u32 dest;
 332	int timeout;
 333
 334	for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
 335		if (pmc_core_mtpmc_link_status(pmcdev) == 0)
 336			break;
 337		msleep(5);
 338	}
 339
 340	if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
 341		return -EBUSY;
 342
 343	dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
 344	pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
 345	return 0;
 346}
 347
 348static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
 349{
 350	struct pmc_dev *pmcdev = s->private;
 351	const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
 352	u32 mphy_core_reg_low, mphy_core_reg_high;
 353	u32 val_low, val_high;
 354	int index, err = 0;
 355
 356	if (pmcdev->pmc_xram_read_bit) {
 357		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
 358		return 0;
 359	}
 360
 361	mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
 362	mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
 363
 364	mutex_lock(&pmcdev->lock);
 365
 366	if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
 367		err = -EBUSY;
 368		goto out_unlock;
 369	}
 370
 371	msleep(10);
 372	val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 373
 374	if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
 375		err = -EBUSY;
 376		goto out_unlock;
 377	}
 378
 379	msleep(10);
 380	val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 381
 382	for (index = 0; index < 8 && map[index].name; index++) {
 383		seq_printf(s, "%-32s\tState: %s\n",
 384			   map[index].name,
 385			   map[index].bit_mask & val_low ? "Not power gated" :
 386			   "Power gated");
 387	}
 388
 389	for (index = 8; map[index].name; index++) {
 390		seq_printf(s, "%-32s\tState: %s\n",
 391			   map[index].name,
 392			   map[index].bit_mask & val_high ? "Not power gated" :
 393			   "Power gated");
 394	}
 395
 396out_unlock:
 397	mutex_unlock(&pmcdev->lock);
 398	return err;
 399}
 400DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
 401
 402static int pmc_core_pll_show(struct seq_file *s, void *unused)
 403{
 404	struct pmc_dev *pmcdev = s->private;
 405	const struct pmc_bit_map *map = pmcdev->map->pll_sts;
 406	u32 mphy_common_reg, val;
 407	int index, err = 0;
 408
 409	if (pmcdev->pmc_xram_read_bit) {
 410		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
 411		return 0;
 412	}
 413
 414	mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
 415	mutex_lock(&pmcdev->lock);
 416
 417	if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
 418		err = -EBUSY;
 419		goto out_unlock;
 420	}
 421
 422	/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
 423	msleep(10);
 424	val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 425
 426	for (index = 0; map[index].name ; index++) {
 427		seq_printf(s, "%-32s\tState: %s\n",
 428			   map[index].name,
 429			   map[index].bit_mask & val ? "Active" : "Idle");
 430	}
 431
 432out_unlock:
 433	mutex_unlock(&pmcdev->lock);
 434	return err;
 435}
 436DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 437
 438int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
 439{
 440	const struct pmc_reg_map *map = pmcdev->map;
 441	u32 reg;
 442	int err = 0;
 443
 444	mutex_lock(&pmcdev->lock);
 445
 446	if (value > map->ltr_ignore_max) {
 447		err = -EINVAL;
 448		goto out_unlock;
 449	}
 450
 451	reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
 452	reg |= BIT(value);
 453	pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
 454
 455out_unlock:
 456	mutex_unlock(&pmcdev->lock);
 457
 458	return err;
 459}
 460
 461static ssize_t pmc_core_ltr_ignore_write(struct file *file,
 462					 const char __user *userbuf,
 463					 size_t count, loff_t *ppos)
 464{
 465	struct seq_file *s = file->private_data;
 466	struct pmc_dev *pmcdev = s->private;
 467	u32 buf_size, value;
 468	int err;
 469
 470	buf_size = min_t(u32, count, 64);
 471
 472	err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
 473	if (err)
 474		return err;
 475
 476	err = pmc_core_send_ltr_ignore(pmcdev, value);
 477
 478	return err == 0 ? count : err;
 479}
 480
 481static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
 482{
 483	return 0;
 484}
 485
 486static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
 487{
 488	return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
 489}
 490
 491static const struct file_operations pmc_core_ltr_ignore_ops = {
 492	.open           = pmc_core_ltr_ignore_open,
 493	.read           = seq_read,
 494	.write          = pmc_core_ltr_ignore_write,
 495	.llseek         = seq_lseek,
 496	.release        = single_release,
 497};
 498
 499static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
 500{
 501	const struct pmc_reg_map *map = pmcdev->map;
 502	u32 fd;
 503
 504	mutex_lock(&pmcdev->lock);
 505
 506	if (!reset && !slps0_dbg_latch)
 507		goto out_unlock;
 508
 509	fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
 510	if (reset)
 511		fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
 512	else
 513		fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
 514	pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
 515
 516	slps0_dbg_latch = false;
 517
 518out_unlock:
 519	mutex_unlock(&pmcdev->lock);
 520}
 521
 522static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
 523{
 524	struct pmc_dev *pmcdev = s->private;
 525
 526	pmc_core_slps0_dbg_latch(pmcdev, false);
 527	pmc_core_slps0_display(pmcdev, NULL, s);
 528	pmc_core_slps0_dbg_latch(pmcdev, true);
 529
 530	return 0;
 531}
 532DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
 533
 534static u32 convert_ltr_scale(u32 val)
 535{
 536	/*
 537	 * As per PCIE specification supporting document
 538	 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
 539	 * Tolerance Reporting data payload is encoded in a
 540	 * 3 bit scale and 10 bit value fields. Values are
 541	 * multiplied by the indicated scale to yield an absolute time
 542	 * value, expressible in a range from 1 nanosecond to
 543	 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
 544	 *
 545	 * scale encoding is as follows:
 546	 *
 547	 * ----------------------------------------------
 548	 * |scale factor	|	Multiplier (ns)	|
 549	 * ----------------------------------------------
 550	 * |	0		|	1		|
 551	 * |	1		|	32		|
 552	 * |	2		|	1024		|
 553	 * |	3		|	32768		|
 554	 * |	4		|	1048576		|
 555	 * |	5		|	33554432	|
 556	 * |	6		|	Invalid		|
 557	 * |	7		|	Invalid		|
 558	 * ----------------------------------------------
 559	 */
 560	if (val > 5) {
 561		pr_warn("Invalid LTR scale factor.\n");
 562		return 0;
 563	}
 564
 565	return 1U << (5 * val);
 566}
 567
 568static int pmc_core_ltr_show(struct seq_file *s, void *unused)
 569{
 570	struct pmc_dev *pmcdev = s->private;
 571	const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
 572	u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
 573	u32 ltr_raw_data, scale, val;
 574	u16 snoop_ltr, nonsnoop_ltr;
 575	int index;
 576
 577	for (index = 0; map[index].name ; index++) {
 578		decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
 579		ltr_raw_data = pmc_core_reg_read(pmcdev,
 580						 map[index].bit_mask);
 581		snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
 582		nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
 583
 584		if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
 585			scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
 586			val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
 587			decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
 588		}
 589
 590		if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
 591			scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
 592			val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
 593			decoded_snoop_ltr = val * convert_ltr_scale(scale);
 594		}
 595
 596		seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
 597			   map[index].name, ltr_raw_data,
 598			   decoded_non_snoop_ltr,
 599			   decoded_snoop_ltr);
 600	}
 601	return 0;
 602}
 603DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
 604
 605static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
 606				       const int lpm_adj_x2)
 607{
 608	u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
 609
 610	return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
 611}
 612
 613static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
 614{
 615	struct pmc_dev *pmcdev = s->private;
 616	const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
 617	u32 offset = pmcdev->map->lpm_residency_offset;
 618	int i, mode;
 619
 620	seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
 621
 622	pmc_for_each_mode(i, mode, pmcdev) {
 623		seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
 624			   adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
 625	}
 626
 627	return 0;
 628}
 629DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
 630
 631static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
 632{
 633	struct pmc_dev *pmcdev = s->private;
 634	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
 635	u32 offset = pmcdev->map->lpm_status_offset;
 636
 637	pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps);
 638
 639	return 0;
 640}
 641DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
 642
 643static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
 644{
 645	struct pmc_dev *pmcdev = s->private;
 646	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
 647	u32 offset = pmcdev->map->lpm_live_status_offset;
 648
 649	pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps);
 650
 651	return 0;
 652}
 653DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
 654
 655static void pmc_core_substate_req_header_show(struct seq_file *s)
 656{
 657	struct pmc_dev *pmcdev = s->private;
 658	int i, mode;
 659
 660	seq_printf(s, "%30s |", "Element");
 661	pmc_for_each_mode(i, mode, pmcdev)
 662		seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
 663
 664	seq_printf(s, " %9s |\n", "Status");
 665}
 666
 667static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
 668{
 669	struct pmc_dev *pmcdev = s->private;
 670	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
 671	const struct pmc_bit_map *map;
 672	const int num_maps = pmcdev->map->lpm_num_maps;
 673	u32 sts_offset = pmcdev->map->lpm_status_offset;
 674	u32 *lpm_req_regs = pmcdev->lpm_req_regs;
 675	int mp;
 676
 677	/* Display the header */
 678	pmc_core_substate_req_header_show(s);
 679
 680	/* Loop over maps */
 681	for (mp = 0; mp < num_maps; mp++) {
 682		u32 req_mask = 0;
 683		u32 lpm_status;
 684		int mode, idx, i, len = 32;
 685
 686		/*
 687		 * Capture the requirements and create a mask so that we only
 688		 * show an element if it's required for at least one of the
 689		 * enabled low power modes
 690		 */
 691		pmc_for_each_mode(idx, mode, pmcdev)
 692			req_mask |= lpm_req_regs[mp + (mode * num_maps)];
 693
 694		/* Get the last latched status for this map */
 695		lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
 696
 697		/*  Loop over elements in this map */
 698		map = maps[mp];
 699		for (i = 0; map[i].name && i < len; i++) {
 700			u32 bit_mask = map[i].bit_mask;
 701
 702			if (!(bit_mask & req_mask))
 703				/*
 704				 * Not required for any enabled states
 705				 * so don't display
 706				 */
 707				continue;
 708
 709			/* Display the element name in the first column */
 710			seq_printf(s, "%30s |", map[i].name);
 711
 712			/* Loop over the enabled states and display if required */
 713			pmc_for_each_mode(idx, mode, pmcdev) {
 714				if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
 715					seq_printf(s, " %9s |",
 716						   "Required");
 717				else
 718					seq_printf(s, " %9s |", " ");
 719			}
 720
 721			/* In Status column, show the last captured state of this agent */
 722			if (lpm_status & bit_mask)
 723				seq_printf(s, " %9s |", "Yes");
 724			else
 725				seq_printf(s, " %9s |", " ");
 726
 727			seq_puts(s, "\n");
 728		}
 729	}
 730
 731	return 0;
 732}
 733DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
 734
 735static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
 736{
 737	struct pmc_dev *pmcdev = s->private;
 738	bool c10;
 739	u32 reg;
 740	int idx, mode;
 741
 742	reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
 743	if (reg & LPM_STS_LATCH_MODE) {
 744		seq_puts(s, "c10");
 745		c10 = false;
 746	} else {
 747		seq_puts(s, "[c10]");
 748		c10 = true;
 749	}
 750
 751	pmc_for_each_mode(idx, mode, pmcdev) {
 752		if ((BIT(mode) & reg) && !c10)
 753			seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
 754		else
 755			seq_printf(s, " %s", pmc_lpm_modes[mode]);
 756	}
 757
 758	seq_puts(s, " clear\n");
 759
 760	return 0;
 761}
 762
 763static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
 764					     const char __user *userbuf,
 765					     size_t count, loff_t *ppos)
 766{
 767	struct seq_file *s = file->private_data;
 768	struct pmc_dev *pmcdev = s->private;
 769	bool clear = false, c10 = false;
 770	unsigned char buf[8];
 771	int idx, m, mode;
 772	u32 reg;
 773
 774	if (count > sizeof(buf) - 1)
 775		return -EINVAL;
 776	if (copy_from_user(buf, userbuf, count))
 777		return -EFAULT;
 778	buf[count] = '\0';
 779
 780	/*
 781	 * Allowed strings are:
 782	 *	Any enabled substate, e.g. 'S0i2.0'
 783	 *	'c10'
 784	 *	'clear'
 785	 */
 786	mode = sysfs_match_string(pmc_lpm_modes, buf);
 787
 788	/* Check string matches enabled mode */
 789	pmc_for_each_mode(idx, m, pmcdev)
 790		if (mode == m)
 791			break;
 792
 793	if (mode != m || mode < 0) {
 794		if (sysfs_streq(buf, "clear"))
 795			clear = true;
 796		else if (sysfs_streq(buf, "c10"))
 797			c10 = true;
 798		else
 799			return -EINVAL;
 800	}
 801
 802	if (clear) {
 803		mutex_lock(&pmcdev->lock);
 804
 805		reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
 806		reg |= ETR3_CLEAR_LPM_EVENTS;
 807		pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
 808
 809		mutex_unlock(&pmcdev->lock);
 810
 811		return count;
 812	}
 813
 814	if (c10) {
 815		mutex_lock(&pmcdev->lock);
 816
 817		reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
 818		reg &= ~LPM_STS_LATCH_MODE;
 819		pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
 820
 821		mutex_unlock(&pmcdev->lock);
 822
 823		return count;
 824	}
 825
 826	/*
 827	 * For LPM mode latching we set the latch enable bit and selected mode
 828	 * and clear everything else.
 829	 */
 830	reg = LPM_STS_LATCH_MODE | BIT(mode);
 831	mutex_lock(&pmcdev->lock);
 832	pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
 833	mutex_unlock(&pmcdev->lock);
 834
 835	return count;
 836}
 837DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
 838
 839static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
 840{
 841	struct pmc_dev *pmcdev = s->private;
 842	const struct pmc_bit_map *map = pmcdev->map->msr_sts;
 843	u64 pcstate_count;
 844	int index;
 845
 846	for (index = 0; map[index].name ; index++) {
 847		if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
 848			continue;
 849
 850		pcstate_count *= 1000;
 851		do_div(pcstate_count, tsc_khz);
 852		seq_printf(s, "%-8s : %llu\n", map[index].name,
 853			   pcstate_count);
 854	}
 855
 856	return 0;
 857}
 858DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
 859
 860static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
 861{
 862	int i, j;
 863
 864	if (!lpm_pri)
 865		return false;
 866	/*
 867	 * Each byte contains the priority level for 2 modes (7:4 and 3:0).
 868	 * In a 32 bit register this allows for describing 8 modes. Store the
 869	 * levels and look for values out of range.
 870	 */
 871	for (i = 0; i < 8; i++) {
 872		int level = lpm_pri & GENMASK(3, 0);
 873
 874		if (level >= LPM_MAX_NUM_MODES)
 875			return false;
 876
 877		mode_order[i] = level;
 878		lpm_pri >>= 4;
 879	}
 880
 881	/* Check that we have unique values */
 882	for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
 883		for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
 884			if (mode_order[i] == mode_order[j])
 885				return false;
 886
 887	return true;
 888}
 889
 890static void pmc_core_get_low_power_modes(struct platform_device *pdev)
 891{
 892	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
 893	u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
 894	u8 mode_order[LPM_MAX_NUM_MODES];
 895	u32 lpm_pri;
 896	u32 lpm_en;
 897	int mode, i, p;
 898
 899	/* Use LPM Maps to indicate support for substates */
 900	if (!pmcdev->map->lpm_num_maps)
 901		return;
 902
 903	lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
 904	/* For MTL, BIT 31 is not an lpm mode but a enable bit.
 905	 * Lower byte is enough to cover the number of lpm modes for all
 906	 * platforms and hence mask the upper 3 bytes.
 907	 */
 908	pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
 909
 910	/* Read 32 bit LPM_PRI register */
 911	lpm_pri = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_priority_offset);
 912
 913
 914	/*
 915	 * If lpm_pri value passes verification, then override the default
 916	 * modes here. Otherwise stick with the default.
 917	 */
 918	if (pmc_core_pri_verify(lpm_pri, mode_order))
 919		/* Get list of modes in priority order */
 920		for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
 921			pri_order[mode_order[mode]] = mode;
 922	else
 923		dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
 924
 925	/*
 926	 * Loop through all modes from lowest to highest priority,
 927	 * and capture all enabled modes in order
 928	 */
 929	i = 0;
 930	for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
 931		int mode = pri_order[p];
 932
 933		if (!(BIT(mode) & lpm_en))
 934			continue;
 935
 936		pmcdev->lpm_en_modes[i++] = mode;
 937	}
 938}
 939
 940static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
 941{
 942	debugfs_remove_recursive(pmcdev->dbgfs_dir);
 943}
 944
 945static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
 946{
 947	struct dentry *dir;
 948
 949	dir = debugfs_create_dir("pmc_core", NULL);
 950	pmcdev->dbgfs_dir = dir;
 951
 952	debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
 953			    &pmc_core_dev_state);
 954
 955	if (pmcdev->map->pfear_sts)
 956		debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
 957				    pmcdev, &pmc_core_ppfear_fops);
 958
 959	debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
 960			    &pmc_core_ltr_ignore_ops);
 961
 962	debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
 963
 964	debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
 965			    &pmc_core_pkgc_fops);
 966
 967	if (pmcdev->map->pll_sts)
 968		debugfs_create_file("pll_status", 0444, dir, pmcdev,
 969				    &pmc_core_pll_fops);
 970
 971	if (pmcdev->map->mphy_sts)
 972		debugfs_create_file("mphy_core_lanes_power_gating_status",
 973				    0444, dir, pmcdev,
 974				    &pmc_core_mphy_pg_fops);
 975
 976	if (pmcdev->map->slps0_dbg_maps) {
 977		debugfs_create_file("slp_s0_debug_status", 0444,
 978				    dir, pmcdev,
 979				    &pmc_core_slps0_dbg_fops);
 980
 981		debugfs_create_bool("slp_s0_dbg_latch", 0644,
 982				    dir, &slps0_dbg_latch);
 983	}
 984
 985	if (pmcdev->map->lpm_en_offset) {
 986		debugfs_create_file("substate_residencies", 0444,
 987				    pmcdev->dbgfs_dir, pmcdev,
 988				    &pmc_core_substate_res_fops);
 989	}
 990
 991	if (pmcdev->map->lpm_status_offset) {
 992		debugfs_create_file("substate_status_registers", 0444,
 993				    pmcdev->dbgfs_dir, pmcdev,
 994				    &pmc_core_substate_sts_regs_fops);
 995		debugfs_create_file("substate_live_status_registers", 0444,
 996				    pmcdev->dbgfs_dir, pmcdev,
 997				    &pmc_core_substate_l_sts_regs_fops);
 998		debugfs_create_file("lpm_latch_mode", 0644,
 999				    pmcdev->dbgfs_dir, pmcdev,
1000				    &pmc_core_lpm_latch_mode_fops);
1001	}
1002
1003	if (pmcdev->lpm_req_regs) {
1004		debugfs_create_file("substate_requirements", 0444,
1005				    pmcdev->dbgfs_dir, pmcdev,
1006				    &pmc_core_substate_req_regs_fops);
1007	}
1008}
1009
1010static const struct x86_cpu_id intel_pmc_core_ids[] = {
1011	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,		spt_core_init),
1012	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,		spt_core_init),
1013	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,		spt_core_init),
1014	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,		spt_core_init),
1015	X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,	cnp_core_init),
1016	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,		icl_core_init),
1017	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI,	icl_core_init),
1018	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,		cnp_core_init),
1019	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,		cnp_core_init),
1020	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		tgl_core_init),
1021	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		tgl_core_init),
1022	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,	tgl_core_init),
1023	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,	icl_core_init),
1024	X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,		tgl_core_init),
1025	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,		tgl_core_init),
1026	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,		tgl_core_init),
1027	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,		adl_core_init),
1028	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        tgl_core_init),
1029	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		adl_core_init),
1030	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	adl_core_init),
1031	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          mtl_core_init),
1032	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	mtl_core_init),
1033	{}
1034};
1035
1036MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1037
1038static const struct pci_device_id pmc_pci_ids[] = {
1039	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
1040	{ }
1041};
1042
1043/*
1044 * This quirk can be used on those platforms where
1045 * the platform BIOS enforces 24Mhz crystal to shutdown
1046 * before PMC can assert SLP_S0#.
1047 */
1048static bool xtal_ignore;
1049static int quirk_xtal_ignore(const struct dmi_system_id *id)
1050{
1051	xtal_ignore = true;
1052	return 0;
1053}
1054
1055static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
1056{
1057	u32 value;
1058
1059	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
1060	/* 24MHz Crystal Shutdown Qualification Disable */
1061	value |= SPT_PMC_VRIC1_XTALSDQDIS;
1062	/* Low Voltage Mode Enable */
1063	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1064	pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
1065}
1066
1067static const struct dmi_system_id pmc_core_dmi_table[]  = {
1068	{
1069	.callback = quirk_xtal_ignore,
1070	.ident = "HP Elite x2 1013 G3",
1071	.matches = {
1072		DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1073		DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1074		},
1075	},
1076	{}
1077};
1078
1079static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
1080{
1081	dmi_check_system(pmc_core_dmi_table);
1082
1083	if (xtal_ignore)
1084		pmc_core_xtal_ignore(pmcdev);
1085}
1086
1087static int pmc_core_probe(struct platform_device *pdev)
1088{
1089	static bool device_initialized;
1090	struct pmc_dev *pmcdev;
1091	const struct x86_cpu_id *cpu_id;
1092	void (*core_init)(struct pmc_dev *pmcdev);
1093	u64 slp_s0_addr;
1094
1095	if (device_initialized)
1096		return -ENODEV;
1097
1098	pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1099	if (!pmcdev)
1100		return -ENOMEM;
1101
1102	platform_set_drvdata(pdev, pmcdev);
1103	pmcdev->pdev = pdev;
1104
1105	cpu_id = x86_match_cpu(intel_pmc_core_ids);
1106	if (!cpu_id)
1107		return -ENODEV;
1108
1109	core_init = (void  (*)(struct pmc_dev *))cpu_id->driver_data;
1110
1111	/*
1112	 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
1113	 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
1114	 * in this case.
1115	 */
1116	if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
1117		core_init = cnp_core_init;
1118
1119	mutex_init(&pmcdev->lock);
1120	core_init(pmcdev);
1121
1122
1123	if (lpit_read_residency_count_address(&slp_s0_addr)) {
1124		pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
1125
1126		if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
1127			return -ENODEV;
1128	} else {
1129		pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
1130	}
1131
1132	pmcdev->regbase = ioremap(pmcdev->base_addr,
1133				  pmcdev->map->regmap_length);
1134	if (!pmcdev->regbase)
1135		return -ENOMEM;
1136
1137	if (pmcdev->core_configure)
1138		pmcdev->core_configure(pmcdev);
1139
1140	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
1141	pmc_core_get_low_power_modes(pdev);
1142	pmc_core_do_dmi_quirks(pmcdev);
1143
1144	pmc_core_dbgfs_register(pmcdev);
1145
1146	device_initialized = true;
1147	dev_info(&pdev->dev, " initialized\n");
1148
1149	return 0;
1150}
1151
1152static int pmc_core_remove(struct platform_device *pdev)
1153{
1154	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1155
1156	pmc_core_dbgfs_unregister(pmcdev);
1157	platform_set_drvdata(pdev, NULL);
1158	mutex_destroy(&pmcdev->lock);
1159	iounmap(pmcdev->regbase);
1160	return 0;
1161}
1162
1163static bool warn_on_s0ix_failures;
1164module_param(warn_on_s0ix_failures, bool, 0644);
1165MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
1166
1167static __maybe_unused int pmc_core_suspend(struct device *dev)
1168{
1169	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1170
1171	pmcdev->check_counters = false;
1172
1173	/* No warnings on S0ix failures */
1174	if (!warn_on_s0ix_failures)
1175		return 0;
1176
1177	/* Check if the syspend will actually use S0ix */
1178	if (pm_suspend_via_firmware())
1179		return 0;
1180
1181	/* Save PC10 residency for checking later */
1182	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
1183		return -EIO;
1184
1185	/* Save S0ix residency for checking later */
1186	if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
1187		return -EIO;
1188
1189	pmcdev->check_counters = true;
1190	return 0;
1191}
1192
1193static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
1194{
1195	u64 pc10_counter;
1196
1197	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
1198		return false;
1199
1200	if (pc10_counter == pmcdev->pc10_counter)
1201		return true;
1202
1203	return false;
1204}
1205
1206static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1207{
1208	u64 s0ix_counter;
1209
1210	if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
1211		return false;
1212
1213	if (s0ix_counter == pmcdev->s0ix_counter)
1214		return true;
1215
1216	return false;
1217}
1218
1219static __maybe_unused int pmc_core_resume(struct device *dev)
1220{
1221	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1222	const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1223	int offset = pmcdev->map->lpm_status_offset;
1224
1225	if (!pmcdev->check_counters)
1226		return 0;
1227
1228	if (!pmc_core_is_s0ix_failed(pmcdev))
1229		return 0;
1230
1231	if (pmc_core_is_pc10_failed(pmcdev)) {
1232		/* S0ix failed because of PC10 entry failure */
1233		dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
1234			 pmcdev->pc10_counter);
1235		return 0;
1236	}
1237
1238	/* The real interesting case - S0ix failed - lets ask PMC why. */
1239	dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1240		 pmcdev->s0ix_counter);
1241	if (pmcdev->map->slps0_dbg_maps)
1242		pmc_core_slps0_display(pmcdev, dev, NULL);
1243	if (pmcdev->map->lpm_sts)
1244		pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps);
1245
1246	return 0;
1247}
1248
1249static const struct dev_pm_ops pmc_core_pm_ops = {
1250	SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1251};
1252
1253static const struct acpi_device_id pmc_core_acpi_ids[] = {
1254	{"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1255	{ }
1256};
1257MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1258
1259static struct platform_driver pmc_core_driver = {
1260	.driver = {
1261		.name = "intel_pmc_core",
1262		.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1263		.pm = &pmc_core_pm_ops,
1264		.dev_groups = pmc_dev_groups,
1265	},
1266	.probe = pmc_core_probe,
1267	.remove = pmc_core_remove,
1268};
1269
1270module_platform_driver(pmc_core_driver);
1271
1272MODULE_LICENSE("GPL v2");
1273MODULE_DESCRIPTION("Intel PMC Core Driver");