Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Intel Core SoC Power Management Controller Driver
   4 *
   5 * Copyright (c) 2016, Intel Corporation.
   6 * All Rights Reserved.
   7 *
   8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
   9 *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/acpi.h>
  15#include <linux/bitfield.h>
  16#include <linux/debugfs.h>
  17#include <linux/delay.h>
  18#include <linux/dmi.h>
  19#include <linux/io.h>
  20#include <linux/module.h>
  21#include <linux/pci.h>
  22#include <linux/platform_device.h>
  23#include <linux/suspend.h>
  24#include <linux/uaccess.h>
  25
  26#include <asm/cpu_device_id.h>
  27#include <asm/intel-family.h>
  28#include <asm/msr.h>
  29#include <asm/tsc.h>
  30
  31#include "intel_pmc_core.h"
  32
  33static struct pmc_dev pmc;
  34
  35/* PKGC MSRs are common across Intel Core SoCs */
  36static const struct pmc_bit_map msr_map[] = {
  37	{"Package C2",                  MSR_PKG_C2_RESIDENCY},
  38	{"Package C3",                  MSR_PKG_C3_RESIDENCY},
  39	{"Package C6",                  MSR_PKG_C6_RESIDENCY},
  40	{"Package C7",                  MSR_PKG_C7_RESIDENCY},
  41	{"Package C8",                  MSR_PKG_C8_RESIDENCY},
  42	{"Package C9",                  MSR_PKG_C9_RESIDENCY},
  43	{"Package C10",                 MSR_PKG_C10_RESIDENCY},
  44	{}
  45};
  46
  47static const struct pmc_bit_map spt_pll_map[] = {
  48	{"MIPI PLL",			SPT_PMC_BIT_MPHY_CMN_LANE0},
  49	{"GEN2 USB2PCIE2 PLL",		SPT_PMC_BIT_MPHY_CMN_LANE1},
  50	{"DMIPCIE3 PLL",		SPT_PMC_BIT_MPHY_CMN_LANE2},
  51	{"SATA PLL",			SPT_PMC_BIT_MPHY_CMN_LANE3},
  52	{},
  53};
  54
  55static const struct pmc_bit_map spt_mphy_map[] = {
  56	{"MPHY CORE LANE 0",           SPT_PMC_BIT_MPHY_LANE0},
  57	{"MPHY CORE LANE 1",           SPT_PMC_BIT_MPHY_LANE1},
  58	{"MPHY CORE LANE 2",           SPT_PMC_BIT_MPHY_LANE2},
  59	{"MPHY CORE LANE 3",           SPT_PMC_BIT_MPHY_LANE3},
  60	{"MPHY CORE LANE 4",           SPT_PMC_BIT_MPHY_LANE4},
  61	{"MPHY CORE LANE 5",           SPT_PMC_BIT_MPHY_LANE5},
  62	{"MPHY CORE LANE 6",           SPT_PMC_BIT_MPHY_LANE6},
  63	{"MPHY CORE LANE 7",           SPT_PMC_BIT_MPHY_LANE7},
  64	{"MPHY CORE LANE 8",           SPT_PMC_BIT_MPHY_LANE8},
  65	{"MPHY CORE LANE 9",           SPT_PMC_BIT_MPHY_LANE9},
  66	{"MPHY CORE LANE 10",          SPT_PMC_BIT_MPHY_LANE10},
  67	{"MPHY CORE LANE 11",          SPT_PMC_BIT_MPHY_LANE11},
  68	{"MPHY CORE LANE 12",          SPT_PMC_BIT_MPHY_LANE12},
  69	{"MPHY CORE LANE 13",          SPT_PMC_BIT_MPHY_LANE13},
  70	{"MPHY CORE LANE 14",          SPT_PMC_BIT_MPHY_LANE14},
  71	{"MPHY CORE LANE 15",          SPT_PMC_BIT_MPHY_LANE15},
  72	{},
  73};
  74
  75static const struct pmc_bit_map spt_pfear_map[] = {
  76	{"PMC",				SPT_PMC_BIT_PMC},
  77	{"OPI-DMI",			SPT_PMC_BIT_OPI},
  78	{"SPI / eSPI",			SPT_PMC_BIT_SPI},
  79	{"XHCI",			SPT_PMC_BIT_XHCI},
  80	{"SPA",				SPT_PMC_BIT_SPA},
  81	{"SPB",				SPT_PMC_BIT_SPB},
  82	{"SPC",				SPT_PMC_BIT_SPC},
  83	{"GBE",				SPT_PMC_BIT_GBE},
  84	{"SATA",			SPT_PMC_BIT_SATA},
  85	{"HDA-PGD0",			SPT_PMC_BIT_HDA_PGD0},
  86	{"HDA-PGD1",			SPT_PMC_BIT_HDA_PGD1},
  87	{"HDA-PGD2",			SPT_PMC_BIT_HDA_PGD2},
  88	{"HDA-PGD3",			SPT_PMC_BIT_HDA_PGD3},
  89	{"RSVD",			SPT_PMC_BIT_RSVD_0B},
  90	{"LPSS",			SPT_PMC_BIT_LPSS},
  91	{"LPC",				SPT_PMC_BIT_LPC},
  92	{"SMB",				SPT_PMC_BIT_SMB},
  93	{"ISH",				SPT_PMC_BIT_ISH},
  94	{"P2SB",			SPT_PMC_BIT_P2SB},
  95	{"DFX",				SPT_PMC_BIT_DFX},
  96	{"SCC",				SPT_PMC_BIT_SCC},
  97	{"RSVD",			SPT_PMC_BIT_RSVD_0C},
  98	{"FUSE",			SPT_PMC_BIT_FUSE},
  99	{"CAMERA",			SPT_PMC_BIT_CAMREA},
 100	{"RSVD",			SPT_PMC_BIT_RSVD_0D},
 101	{"USB3-OTG",			SPT_PMC_BIT_USB3_OTG},
 102	{"EXI",				SPT_PMC_BIT_EXI},
 103	{"CSE",				SPT_PMC_BIT_CSE},
 104	{"CSME_KVM",			SPT_PMC_BIT_CSME_KVM},
 105	{"CSME_PMT",			SPT_PMC_BIT_CSME_PMT},
 106	{"CSME_CLINK",			SPT_PMC_BIT_CSME_CLINK},
 107	{"CSME_PTIO",			SPT_PMC_BIT_CSME_PTIO},
 108	{"CSME_USBR",			SPT_PMC_BIT_CSME_USBR},
 109	{"CSME_SUSRAM",			SPT_PMC_BIT_CSME_SUSRAM},
 110	{"CSME_SMT",			SPT_PMC_BIT_CSME_SMT},
 111	{"RSVD",			SPT_PMC_BIT_RSVD_1A},
 112	{"CSME_SMS2",			SPT_PMC_BIT_CSME_SMS2},
 113	{"CSME_SMS1",			SPT_PMC_BIT_CSME_SMS1},
 114	{"CSME_RTC",			SPT_PMC_BIT_CSME_RTC},
 115	{"CSME_PSF",			SPT_PMC_BIT_CSME_PSF},
 116	{},
 117};
 118
 119static const struct pmc_bit_map spt_ltr_show_map[] = {
 120	{"SOUTHPORT_A",		SPT_PMC_LTR_SPA},
 121	{"SOUTHPORT_B",		SPT_PMC_LTR_SPB},
 122	{"SATA",		SPT_PMC_LTR_SATA},
 123	{"GIGABIT_ETHERNET",	SPT_PMC_LTR_GBE},
 124	{"XHCI",		SPT_PMC_LTR_XHCI},
 125	{"Reserved",		SPT_PMC_LTR_RESERVED},
 126	{"ME",			SPT_PMC_LTR_ME},
 127	/* EVA is Enterprise Value Add, doesn't really exist on PCH */
 128	{"EVA",			SPT_PMC_LTR_EVA},
 129	{"SOUTHPORT_C",		SPT_PMC_LTR_SPC},
 130	{"HD_AUDIO",		SPT_PMC_LTR_AZ},
 131	{"LPSS",		SPT_PMC_LTR_LPSS},
 132	{"SOUTHPORT_D",		SPT_PMC_LTR_SPD},
 133	{"SOUTHPORT_E",		SPT_PMC_LTR_SPE},
 134	{"CAMERA",		SPT_PMC_LTR_CAM},
 135	{"ESPI",		SPT_PMC_LTR_ESPI},
 136	{"SCC",			SPT_PMC_LTR_SCC},
 137	{"ISH",			SPT_PMC_LTR_ISH},
 138	/* Below two cannot be used for LTR_IGNORE */
 139	{"CURRENT_PLATFORM",	SPT_PMC_LTR_CUR_PLT},
 140	{"AGGREGATED_SYSTEM",	SPT_PMC_LTR_CUR_ASLT},
 141	{}
 142};
 143
 144static const struct pmc_reg_map spt_reg_map = {
 145	.pfear_sts = spt_pfear_map,
 146	.mphy_sts = spt_mphy_map,
 147	.pll_sts = spt_pll_map,
 148	.ltr_show_sts = spt_ltr_show_map,
 149	.msr_sts = msr_map,
 150	.slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
 151	.ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
 152	.regmap_length = SPT_PMC_MMIO_REG_LEN,
 153	.ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
 154	.ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
 155	.pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
 156	.pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
 157	.ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
 158	.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
 159};
 160
 161/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
 162static const struct pmc_bit_map cnp_pfear_map[] = {
 163	{"PMC",                 BIT(0)},
 164	{"OPI-DMI",             BIT(1)},
 165	{"SPI/eSPI",            BIT(2)},
 166	{"XHCI",                BIT(3)},
 167	{"SPA",                 BIT(4)},
 168	{"SPB",                 BIT(5)},
 169	{"SPC",                 BIT(6)},
 170	{"GBE",                 BIT(7)},
 171
 172	{"SATA",                BIT(0)},
 173	{"HDA_PGD0",            BIT(1)},
 174	{"HDA_PGD1",            BIT(2)},
 175	{"HDA_PGD2",            BIT(3)},
 176	{"HDA_PGD3",            BIT(4)},
 177	{"SPD",                 BIT(5)},
 178	{"LPSS",                BIT(6)},
 179	{"LPC",                 BIT(7)},
 180
 181	{"SMB",                 BIT(0)},
 182	{"ISH",                 BIT(1)},
 183	{"P2SB",                BIT(2)},
 184	{"NPK_VNN",             BIT(3)},
 185	{"SDX",                 BIT(4)},
 186	{"SPE",                 BIT(5)},
 187	{"Fuse",                BIT(6)},
 188	/* Reserved for Cannonlake but valid for Icelake */
 189	{"SBR8",		BIT(7)},
 190
 191	{"CSME_FSC",            BIT(0)},
 192	{"USB3_OTG",            BIT(1)},
 193	{"EXI",                 BIT(2)},
 194	{"CSE",                 BIT(3)},
 195	{"CSME_KVM",            BIT(4)},
 196	{"CSME_PMT",            BIT(5)},
 197	{"CSME_CLINK",          BIT(6)},
 198	{"CSME_PTIO",           BIT(7)},
 199
 200	{"CSME_USBR",           BIT(0)},
 201	{"CSME_SUSRAM",         BIT(1)},
 202	{"CSME_SMT1",           BIT(2)},
 203	{"CSME_SMT4",           BIT(3)},
 204	{"CSME_SMS2",           BIT(4)},
 205	{"CSME_SMS1",           BIT(5)},
 206	{"CSME_RTC",            BIT(6)},
 207	{"CSME_PSF",            BIT(7)},
 208
 209	{"SBR0",                BIT(0)},
 210	{"SBR1",                BIT(1)},
 211	{"SBR2",                BIT(2)},
 212	{"SBR3",                BIT(3)},
 213	{"SBR4",                BIT(4)},
 214	{"SBR5",                BIT(5)},
 215	{"CSME_PECI",           BIT(6)},
 216	{"PSF1",                BIT(7)},
 217
 218	{"PSF2",                BIT(0)},
 219	{"PSF3",                BIT(1)},
 220	{"PSF4",                BIT(2)},
 221	{"CNVI",                BIT(3)},
 222	{"UFS0",                BIT(4)},
 223	{"EMMC",                BIT(5)},
 224	{"SPF",			BIT(6)},
 225	{"SBR6",                BIT(7)},
 226
 227	{"SBR7",                BIT(0)},
 228	{"NPK_AON",             BIT(1)},
 229	{"HDA_PGD4",            BIT(2)},
 230	{"HDA_PGD5",            BIT(3)},
 231	{"HDA_PGD6",            BIT(4)},
 232	/* Reserved for Cannonlake but valid for Icelake */
 233	{"PSF6",		BIT(5)},
 234	{"PSF7",		BIT(6)},
 235	{"PSF8",		BIT(7)},
 236
 237	/* Icelake generation onwards only */
 238	{"RES_65",		BIT(0)},
 239	{"RES_66",		BIT(1)},
 240	{"RES_67",		BIT(2)},
 241	{"TAM",			BIT(3)},
 242	{"GBETSN",		BIT(4)},
 243	{"TBTLSX",		BIT(5)},
 244	{"RES_71",		BIT(6)},
 245	{"RES_72",		BIT(7)},
 246	{}
 247};
 248
 249static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
 250	{"AUDIO_D3",		BIT(0)},
 251	{"OTG_D3",		BIT(1)},
 252	{"XHCI_D3",		BIT(2)},
 253	{"LPIO_D3",		BIT(3)},
 254	{"SDX_D3",		BIT(4)},
 255	{"SATA_D3",		BIT(5)},
 256	{"UFS0_D3",		BIT(6)},
 257	{"UFS1_D3",		BIT(7)},
 258	{"EMMC_D3",		BIT(8)},
 259	{}
 260};
 261
 262static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
 263	{"SDIO_PLL_OFF",	BIT(0)},
 264	{"USB2_PLL_OFF",	BIT(1)},
 265	{"AUDIO_PLL_OFF",	BIT(2)},
 266	{"OC_PLL_OFF",		BIT(3)},
 267	{"MAIN_PLL_OFF",	BIT(4)},
 268	{"XOSC_OFF",		BIT(5)},
 269	{"LPC_CLKS_GATED",	BIT(6)},
 270	{"PCIE_CLKREQS_IDLE",	BIT(7)},
 271	{"AUDIO_ROSC_OFF",	BIT(8)},
 272	{"HPET_XOSC_CLK_REQ",	BIT(9)},
 273	{"PMC_ROSC_SLOW_CLK",	BIT(10)},
 274	{"AON2_ROSC_GATED",	BIT(11)},
 275	{"CLKACKS_DEASSERTED",	BIT(12)},
 276	{}
 277};
 278
 279static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
 280	{"MPHY_CORE_GATED",	BIT(0)},
 281	{"CSME_GATED",		BIT(1)},
 282	{"USB2_SUS_GATED",	BIT(2)},
 283	{"DYN_FLEX_IO_IDLE",	BIT(3)},
 284	{"GBE_NO_LINK",		BIT(4)},
 285	{"THERM_SEN_DISABLED",	BIT(5)},
 286	{"PCIE_LOW_POWER",	BIT(6)},
 287	{"ISH_VNNAON_REQ_ACT",	BIT(7)},
 288	{"ISH_VNN_REQ_ACT",	BIT(8)},
 289	{"CNV_VNNAON_REQ_ACT",	BIT(9)},
 290	{"CNV_VNN_REQ_ACT",	BIT(10)},
 291	{"NPK_VNNON_REQ_ACT",	BIT(11)},
 292	{"PMSYNC_STATE_IDLE",	BIT(12)},
 293	{"ALST_GT_THRES",	BIT(13)},
 294	{"PMC_ARC_PG_READY",	BIT(14)},
 295	{}
 296};
 297
 298static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
 299	cnp_slps0_dbg0_map,
 300	cnp_slps0_dbg1_map,
 301	cnp_slps0_dbg2_map,
 302	NULL,
 303};
 304
 305static const struct pmc_bit_map cnp_ltr_show_map[] = {
 306	{"SOUTHPORT_A",		CNP_PMC_LTR_SPA},
 307	{"SOUTHPORT_B",		CNP_PMC_LTR_SPB},
 308	{"SATA",		CNP_PMC_LTR_SATA},
 309	{"GIGABIT_ETHERNET",	CNP_PMC_LTR_GBE},
 310	{"XHCI",		CNP_PMC_LTR_XHCI},
 311	{"Reserved",		CNP_PMC_LTR_RESERVED},
 312	{"ME",			CNP_PMC_LTR_ME},
 313	/* EVA is Enterprise Value Add, doesn't really exist on PCH */
 314	{"EVA",			CNP_PMC_LTR_EVA},
 315	{"SOUTHPORT_C",		CNP_PMC_LTR_SPC},
 316	{"HD_AUDIO",		CNP_PMC_LTR_AZ},
 317	{"CNV",			CNP_PMC_LTR_CNV},
 318	{"LPSS",		CNP_PMC_LTR_LPSS},
 319	{"SOUTHPORT_D",		CNP_PMC_LTR_SPD},
 320	{"SOUTHPORT_E",		CNP_PMC_LTR_SPE},
 321	{"CAMERA",		CNP_PMC_LTR_CAM},
 322	{"ESPI",		CNP_PMC_LTR_ESPI},
 323	{"SCC",			CNP_PMC_LTR_SCC},
 324	{"ISH",			CNP_PMC_LTR_ISH},
 325	{"UFSX2",		CNP_PMC_LTR_UFSX2},
 326	{"EMMC",		CNP_PMC_LTR_EMMC},
 327	/* Reserved for Cannonlake but valid for Icelake */
 328	{"WIGIG",		ICL_PMC_LTR_WIGIG},
 329	/* Below two cannot be used for LTR_IGNORE */
 330	{"CURRENT_PLATFORM",	CNP_PMC_LTR_CUR_PLT},
 331	{"AGGREGATED_SYSTEM",	CNP_PMC_LTR_CUR_ASLT},
 332	{}
 333};
 334
 335static const struct pmc_reg_map cnp_reg_map = {
 336	.pfear_sts = cnp_pfear_map,
 337	.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
 338	.slps0_dbg_maps = cnp_slps0_dbg_maps,
 339	.ltr_show_sts = cnp_ltr_show_map,
 340	.msr_sts = msr_map,
 341	.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
 342	.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
 343	.regmap_length = CNP_PMC_MMIO_REG_LEN,
 344	.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
 345	.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
 346	.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
 347	.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
 348	.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
 349};
 350
 351static const struct pmc_reg_map icl_reg_map = {
 352	.pfear_sts = cnp_pfear_map,
 353	.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
 354	.slps0_dbg_maps = cnp_slps0_dbg_maps,
 355	.ltr_show_sts = cnp_ltr_show_map,
 356	.msr_sts = msr_map,
 357	.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
 358	.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
 359	.regmap_length = CNP_PMC_MMIO_REG_LEN,
 360	.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
 361	.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
 362	.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
 363	.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
 364	.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
 365};
 366
 367static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
 368{
 369	return readb(pmcdev->regbase + offset);
 370}
 371
 372static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
 373{
 374	return readl(pmcdev->regbase + reg_offset);
 375}
 376
 377static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
 378							reg_offset, u32 val)
 379{
 380	writel(val, pmcdev->regbase + reg_offset);
 381}
 382
 383static inline u64 pmc_core_adjust_slp_s0_step(u32 value)
 384{
 385	return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
 386}
 387
 388static int pmc_core_dev_state_get(void *data, u64 *val)
 389{
 390	struct pmc_dev *pmcdev = data;
 391	const struct pmc_reg_map *map = pmcdev->map;
 392	u32 value;
 393
 394	value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
 395	*val = pmc_core_adjust_slp_s0_step(value);
 396
 397	return 0;
 398}
 399
 400DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
 401
 402static int pmc_core_check_read_lock_bit(void)
 403{
 404	struct pmc_dev *pmcdev = &pmc;
 405	u32 value;
 406
 407	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
 408	return value & BIT(pmcdev->map->pm_read_disable_bit);
 409}
 410
 411#if IS_ENABLED(CONFIG_DEBUG_FS)
 412static bool slps0_dbg_latch;
 413
 414static void pmc_core_display_map(struct seq_file *s, int index,
 415				 u8 pf_reg, const struct pmc_bit_map *pf_map)
 416{
 417	seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
 418		   index, pf_map[index].name,
 419		   pf_map[index].bit_mask & pf_reg ? "Off" : "On");
 420}
 421
 422static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
 423{
 424	struct pmc_dev *pmcdev = s->private;
 425	const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
 426	u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
 427	int index, iter;
 428
 429	iter = pmcdev->map->ppfear0_offset;
 430
 431	for (index = 0; index < pmcdev->map->ppfear_buckets &&
 432	     index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
 433		pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
 434
 435	for (index = 0; map[index].name &&
 436	     index < pmcdev->map->ppfear_buckets * 8; index++)
 437		pmc_core_display_map(s, index, pf_regs[index / 8], map);
 438
 439	return 0;
 440}
 441DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
 442
 443/* This function should return link status, 0 means ready */
 444static int pmc_core_mtpmc_link_status(void)
 445{
 446	struct pmc_dev *pmcdev = &pmc;
 447	u32 value;
 448
 449	value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
 450	return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
 451}
 452
 453static int pmc_core_send_msg(u32 *addr_xram)
 454{
 455	struct pmc_dev *pmcdev = &pmc;
 456	u32 dest;
 457	int timeout;
 458
 459	for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
 460		if (pmc_core_mtpmc_link_status() == 0)
 461			break;
 462		msleep(5);
 463	}
 464
 465	if (timeout <= 0 && pmc_core_mtpmc_link_status())
 466		return -EBUSY;
 467
 468	dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
 469	pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
 470	return 0;
 471}
 472
 473static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
 474{
 475	struct pmc_dev *pmcdev = s->private;
 476	const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
 477	u32 mphy_core_reg_low, mphy_core_reg_high;
 478	u32 val_low, val_high;
 479	int index, err = 0;
 480
 481	if (pmcdev->pmc_xram_read_bit) {
 482		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
 483		return 0;
 484	}
 485
 486	mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
 487	mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
 488
 489	mutex_lock(&pmcdev->lock);
 490
 491	if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
 492		err = -EBUSY;
 493		goto out_unlock;
 494	}
 495
 496	msleep(10);
 497	val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 498
 499	if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
 500		err = -EBUSY;
 501		goto out_unlock;
 502	}
 503
 504	msleep(10);
 505	val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 506
 507	for (index = 0; map[index].name && index < 8; index++) {
 508		seq_printf(s, "%-32s\tState: %s\n",
 509			   map[index].name,
 510			   map[index].bit_mask & val_low ? "Not power gated" :
 511			   "Power gated");
 512	}
 513
 514	for (index = 8; map[index].name; index++) {
 515		seq_printf(s, "%-32s\tState: %s\n",
 516			   map[index].name,
 517			   map[index].bit_mask & val_high ? "Not power gated" :
 518			   "Power gated");
 519	}
 520
 521out_unlock:
 522	mutex_unlock(&pmcdev->lock);
 523	return err;
 524}
 525DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
 526
 527static int pmc_core_pll_show(struct seq_file *s, void *unused)
 528{
 529	struct pmc_dev *pmcdev = s->private;
 530	const struct pmc_bit_map *map = pmcdev->map->pll_sts;
 531	u32 mphy_common_reg, val;
 532	int index, err = 0;
 533
 534	if (pmcdev->pmc_xram_read_bit) {
 535		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
 536		return 0;
 537	}
 538
 539	mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
 540	mutex_lock(&pmcdev->lock);
 541
 542	if (pmc_core_send_msg(&mphy_common_reg) != 0) {
 543		err = -EBUSY;
 544		goto out_unlock;
 545	}
 546
 547	/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
 548	msleep(10);
 549	val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 550
 551	for (index = 0; map[index].name ; index++) {
 552		seq_printf(s, "%-32s\tState: %s\n",
 553			   map[index].name,
 554			   map[index].bit_mask & val ? "Active" : "Idle");
 555	}
 556
 557out_unlock:
 558	mutex_unlock(&pmcdev->lock);
 559	return err;
 560}
 561DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 562
 563static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
 564*userbuf, size_t count, loff_t *ppos)
 565{
 566	struct pmc_dev *pmcdev = &pmc;
 567	const struct pmc_reg_map *map = pmcdev->map;
 568	u32 val, buf_size, fd;
 569	int err = 0;
 570
 571	buf_size = count < 64 ? count : 64;
 572	mutex_lock(&pmcdev->lock);
 573
 574	if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
 575		err = -EFAULT;
 576		goto out_unlock;
 577	}
 578
 579	if (val > map->ltr_ignore_max) {
 580		err = -EINVAL;
 581		goto out_unlock;
 582	}
 583
 584	fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
 585	fd |= (1U << val);
 586	pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
 587
 588out_unlock:
 589	mutex_unlock(&pmcdev->lock);
 590	return err == 0 ? count : err;
 591}
 592
 593static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
 594{
 595	return 0;
 596}
 597
 598static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
 599{
 600	return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
 601}
 602
 603static const struct file_operations pmc_core_ltr_ignore_ops = {
 604	.open           = pmc_core_ltr_ignore_open,
 605	.read           = seq_read,
 606	.write          = pmc_core_ltr_ignore_write,
 607	.llseek         = seq_lseek,
 608	.release        = single_release,
 609};
 610
 611static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
 612{
 613	const struct pmc_reg_map *map = pmcdev->map;
 614	u32 fd;
 615
 616	mutex_lock(&pmcdev->lock);
 617
 618	if (!reset && !slps0_dbg_latch)
 619		goto out_unlock;
 620
 621	fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
 622	if (reset)
 623		fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
 624	else
 625		fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
 626	pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
 627
 628	slps0_dbg_latch = 0;
 629
 630out_unlock:
 631	mutex_unlock(&pmcdev->lock);
 632}
 633
 634static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
 635{
 636	struct pmc_dev *pmcdev = s->private;
 637	const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
 638	const struct pmc_bit_map *map;
 639	int offset;
 640	u32 data;
 641
 642	pmc_core_slps0_dbg_latch(pmcdev, false);
 643	offset = pmcdev->map->slps0_dbg_offset;
 644	while (*maps) {
 645		map = *maps;
 646		data = pmc_core_reg_read(pmcdev, offset);
 647		offset += 4;
 648		while (map->name) {
 649			seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
 650				   map->name,
 651				   data & map->bit_mask ?
 652				   "Yes" : "No");
 653			++map;
 654		}
 655		++maps;
 656	}
 657	pmc_core_slps0_dbg_latch(pmcdev, true);
 658	return 0;
 659}
 660DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
 661
 662static u32 convert_ltr_scale(u32 val)
 663{
 664	/*
 665	 * As per PCIE specification supporting document
 666	 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
 667	 * Tolerance Reporting data payload is encoded in a
 668	 * 3 bit scale and 10 bit value fields. Values are
 669	 * multiplied by the indicated scale to yield an absolute time
 670	 * value, expressible in a range from 1 nanosecond to
 671	 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
 672	 *
 673	 * scale encoding is as follows:
 674	 *
 675	 * ----------------------------------------------
 676	 * |scale factor	|	Multiplier (ns)	|
 677	 * ----------------------------------------------
 678	 * |	0		|	1		|
 679	 * |	1		|	32		|
 680	 * |	2		|	1024		|
 681	 * |	3		|	32768		|
 682	 * |	4		|	1048576		|
 683	 * |	5		|	33554432	|
 684	 * |	6		|	Invalid		|
 685	 * |	7		|	Invalid		|
 686	 * ----------------------------------------------
 687	 */
 688	if (val > 5) {
 689		pr_warn("Invalid LTR scale factor.\n");
 690		return 0;
 691	}
 692
 693	return 1U << (5 * val);
 694}
 695
 696static int pmc_core_ltr_show(struct seq_file *s, void *unused)
 697{
 698	struct pmc_dev *pmcdev = s->private;
 699	const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
 700	u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
 701	u32 ltr_raw_data, scale, val;
 702	u16 snoop_ltr, nonsnoop_ltr;
 703	int index;
 704
 705	for (index = 0; map[index].name ; index++) {
 706		decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
 707		ltr_raw_data = pmc_core_reg_read(pmcdev,
 708						 map[index].bit_mask);
 709		snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
 710		nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
 711
 712		if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
 713			scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
 714			val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
 715			decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
 716		}
 717
 718		if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
 719			scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
 720			val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
 721			decoded_snoop_ltr = val * convert_ltr_scale(scale);
 722		}
 723
 724		seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
 725			   map[index].name, ltr_raw_data,
 726			   decoded_non_snoop_ltr,
 727			   decoded_snoop_ltr);
 728	}
 729	return 0;
 730}
 731DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
 732
 733static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
 734{
 735	struct pmc_dev *pmcdev = s->private;
 736	const struct pmc_bit_map *map = pmcdev->map->msr_sts;
 737	u64 pcstate_count;
 738	int index;
 739
 740	for (index = 0; map[index].name ; index++) {
 741		if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
 742			continue;
 743
 744		pcstate_count *= 1000;
 745		do_div(pcstate_count, tsc_khz);
 746		seq_printf(s, "%-8s : %llu\n", map[index].name,
 747			   pcstate_count);
 748	}
 749
 750	return 0;
 751}
 752DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
 753
 754static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
 755{
 756	debugfs_remove_recursive(pmcdev->dbgfs_dir);
 757}
 758
 759static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
 760{
 761	struct dentry *dir;
 762
 763	dir = debugfs_create_dir("pmc_core", NULL);
 764	pmcdev->dbgfs_dir = dir;
 765
 766	debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
 767			    &pmc_core_dev_state);
 768
 769	debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
 770			    &pmc_core_ppfear_fops);
 771
 772	debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
 773			    &pmc_core_ltr_ignore_ops);
 774
 775	debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
 776
 777	debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
 778			    &pmc_core_pkgc_fops);
 779
 780	if (pmcdev->map->pll_sts)
 781		debugfs_create_file("pll_status", 0444, dir, pmcdev,
 782				    &pmc_core_pll_fops);
 783
 784	if (pmcdev->map->mphy_sts)
 785		debugfs_create_file("mphy_core_lanes_power_gating_status",
 786				    0444, dir, pmcdev,
 787				    &pmc_core_mphy_pg_fops);
 788
 789	if (pmcdev->map->slps0_dbg_maps) {
 790		debugfs_create_file("slp_s0_debug_status", 0444,
 791				    dir, pmcdev,
 792				    &pmc_core_slps0_dbg_fops);
 793
 794		debugfs_create_bool("slp_s0_dbg_latch", 0644,
 795				    dir, &slps0_dbg_latch);
 796	}
 797}
 798#else
 799static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
 800{
 801}
 802
 803static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
 804{
 805}
 806#endif /* CONFIG_DEBUG_FS */
 807
 808static const struct x86_cpu_id intel_pmc_core_ids[] = {
 809	INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map),
 810	INTEL_CPU_FAM6(SKYLAKE, spt_reg_map),
 811	INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map),
 812	INTEL_CPU_FAM6(KABYLAKE, spt_reg_map),
 813	INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
 814	INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
 815	INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
 816	{}
 817};
 818
 819MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
 820
 821static const struct pci_device_id pmc_pci_ids[] = {
 822	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
 823	{ 0, },
 824};
 825
 826/*
 827 * This quirk can be used on those platforms where
 828 * the platform BIOS enforces 24Mhx Crystal to shutdown
 829 * before PMC can assert SLP_S0#.
 830 */
 831static int quirk_xtal_ignore(const struct dmi_system_id *id)
 832{
 833	struct pmc_dev *pmcdev = &pmc;
 834	u32 value;
 835
 836	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
 837	/* 24MHz Crystal Shutdown Qualification Disable */
 838	value |= SPT_PMC_VRIC1_XTALSDQDIS;
 839	/* Low Voltage Mode Enable */
 840	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
 841	pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
 842	return 0;
 843}
 844
 845static const struct dmi_system_id pmc_core_dmi_table[]  = {
 846	{
 847	.callback = quirk_xtal_ignore,
 848	.ident = "HP Elite x2 1013 G3",
 849	.matches = {
 850		DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 851		DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
 852		},
 853	},
 854	{}
 855};
 856
 857static int pmc_core_probe(struct platform_device *pdev)
 858{
 859	static bool device_initialized;
 860	struct pmc_dev *pmcdev = &pmc;
 861	const struct x86_cpu_id *cpu_id;
 862	u64 slp_s0_addr;
 863
 864	if (device_initialized)
 865		return -ENODEV;
 866
 867	cpu_id = x86_match_cpu(intel_pmc_core_ids);
 868	if (!cpu_id)
 869		return -ENODEV;
 870
 871	pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
 872
 873	/*
 874	 * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here
 875	 * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
 876	 * in this case.
 877	 */
 878	if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
 879		pmcdev->map = &cnp_reg_map;
 880
 881	if (lpit_read_residency_count_address(&slp_s0_addr)) {
 882		pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
 883
 884		if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
 885			return -ENODEV;
 886	} else {
 887		pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
 888	}
 889
 890	pmcdev->regbase = ioremap(pmcdev->base_addr,
 891				  pmcdev->map->regmap_length);
 892	if (!pmcdev->regbase)
 893		return -ENOMEM;
 894
 895	mutex_init(&pmcdev->lock);
 896	platform_set_drvdata(pdev, pmcdev);
 897	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
 898	dmi_check_system(pmc_core_dmi_table);
 899
 900	pmc_core_dbgfs_register(pmcdev);
 901
 902	device_initialized = true;
 903	dev_info(&pdev->dev, " initialized\n");
 904
 905	return 0;
 906}
 907
 908static int pmc_core_remove(struct platform_device *pdev)
 909{
 910	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
 911
 912	pmc_core_dbgfs_unregister(pmcdev);
 913	platform_set_drvdata(pdev, NULL);
 914	mutex_destroy(&pmcdev->lock);
 915	iounmap(pmcdev->regbase);
 916	return 0;
 917}
 918
 919#ifdef CONFIG_PM_SLEEP
 920
 921static bool warn_on_s0ix_failures;
 922module_param(warn_on_s0ix_failures, bool, 0644);
 923MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
 924
 925static int pmc_core_suspend(struct device *dev)
 926{
 927	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 928
 929	pmcdev->check_counters = false;
 930
 931	/* No warnings on S0ix failures */
 932	if (!warn_on_s0ix_failures)
 933		return 0;
 934
 935	/* Check if the syspend will actually use S0ix */
 936	if (pm_suspend_via_firmware())
 937		return 0;
 938
 939	/* Save PC10 residency for checking later */
 940	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
 941		return -EIO;
 942
 943	/* Save S0ix residency for checking later */
 944	if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
 945		return -EIO;
 946
 947	pmcdev->check_counters = true;
 948	return 0;
 949}
 950
 951static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
 952{
 953	u64 pc10_counter;
 954
 955	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
 956		return false;
 957
 958	if (pc10_counter == pmcdev->pc10_counter)
 959		return true;
 960
 961	return false;
 962}
 963
 964static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
 965{
 966	u64 s0ix_counter;
 967
 968	if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
 969		return false;
 970
 971	if (s0ix_counter == pmcdev->s0ix_counter)
 972		return true;
 973
 974	return false;
 975}
 976
 977static int pmc_core_resume(struct device *dev)
 978{
 979	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 980	const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
 981	int offset = pmcdev->map->slps0_dbg_offset;
 982	const struct pmc_bit_map *map;
 983	u32 data;
 984
 985	if (!pmcdev->check_counters)
 986		return 0;
 987
 988	if (!pmc_core_is_s0ix_failed(pmcdev))
 989		return 0;
 990
 991	if (pmc_core_is_pc10_failed(pmcdev)) {
 992		/* S0ix failed because of PC10 entry failure */
 993		dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
 994			 pmcdev->pc10_counter);
 995		return 0;
 996	}
 997
 998	/* The real interesting case - S0ix failed - lets ask PMC why. */
 999	dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1000		 pmcdev->s0ix_counter);
1001	while (*maps) {
1002		map = *maps;
1003		data = pmc_core_reg_read(pmcdev, offset);
1004		offset += 4;
1005		while (map->name) {
1006			dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
1007				map->name,
1008				data & map->bit_mask ? "Yes" : "No");
1009			map++;
1010		}
1011		maps++;
1012	}
1013	return 0;
1014}
1015
1016#endif
1017
1018static const struct dev_pm_ops pmc_core_pm_ops = {
1019	SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1020};
1021
1022static const struct acpi_device_id pmc_core_acpi_ids[] = {
1023	{"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1024	{ }
1025};
1026MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1027
1028static struct platform_driver pmc_core_driver = {
1029	.driver = {
1030		.name = "intel_pmc_core",
1031		.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1032		.pm = &pmc_core_pm_ops,
1033	},
1034	.probe = pmc_core_probe,
1035	.remove = pmc_core_remove,
1036};
1037
1038module_platform_driver(pmc_core_driver);
1039
1040MODULE_LICENSE("GPL v2");
1041MODULE_DESCRIPTION("Intel PMC Core Driver");