Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2//
   3// This file is provided under a dual BSD/GPLv2 license.  When using or
   4// redistributing this file, you may do so under either license.
   5//
   6// Copyright(c) 2018 Intel Corporation. All rights reserved.
   7//
   8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
   9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
  10//	    Rander Wang <rander.wang@intel.com>
  11//          Keyon Jie <yang.jie@linux.intel.com>
  12//
  13
  14/*
  15 * Hardware interface for generic Intel audio DSP HDA IP
  16 */
  17
  18#include <linux/module.h>
  19#include <sound/hdaudio_ext.h>
  20#include <sound/hda_register.h>
 
  21#include <trace/events/sof_intel.h>
  22#include "../sof-audio.h"
  23#include "../ops.h"
  24#include "hda.h"
  25#include "hda-ipc.h"
  26
  27static bool hda_enable_trace_D0I3_S0;
  28#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
  29module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
  30MODULE_PARM_DESC(enable_trace_D0I3_S0,
  31		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
  32#endif
  33
  34/*
  35 * DSP Core control.
  36 */
  37
  38static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
  39{
  40	u32 adspcs;
  41	u32 reset;
  42	int ret;
  43
  44	/* set reset bits for cores */
  45	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  46	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  47					 HDA_DSP_REG_ADSPCS,
  48					 reset, reset);
  49
  50	/* poll with timeout to check if operation successful */
  51	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  52					HDA_DSP_REG_ADSPCS, adspcs,
  53					((adspcs & reset) == reset),
  54					HDA_DSP_REG_POLL_INTERVAL_US,
  55					HDA_DSP_RESET_TIMEOUT_US);
  56	if (ret < 0) {
  57		dev_err(sdev->dev,
  58			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
  59			__func__);
  60		return ret;
  61	}
  62
  63	/* has core entered reset ? */
  64	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
  65				  HDA_DSP_REG_ADSPCS);
  66	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
  67		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
  68		dev_err(sdev->dev,
  69			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
  70			core_mask, adspcs);
  71		ret = -EIO;
  72	}
  73
  74	return ret;
  75}
  76
  77static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
  78{
  79	unsigned int crst;
  80	u32 adspcs;
  81	int ret;
  82
  83	/* clear reset bits for cores */
  84	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  85					 HDA_DSP_REG_ADSPCS,
  86					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
  87					 0);
  88
  89	/* poll with timeout to check if operation successful */
  90	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  91	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  92					    HDA_DSP_REG_ADSPCS, adspcs,
  93					    !(adspcs & crst),
  94					    HDA_DSP_REG_POLL_INTERVAL_US,
  95					    HDA_DSP_RESET_TIMEOUT_US);
  96
  97	if (ret < 0) {
  98		dev_err(sdev->dev,
  99			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 100			__func__);
 101		return ret;
 102	}
 103
 104	/* has core left reset ? */
 105	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 106				  HDA_DSP_REG_ADSPCS);
 107	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
 108		dev_err(sdev->dev,
 109			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
 110			core_mask, adspcs);
 111		ret = -EIO;
 112	}
 113
 114	return ret;
 115}
 116
 117int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
 118{
 119	/* stall core */
 120	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 121					 HDA_DSP_REG_ADSPCS,
 122					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 123					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 124
 125	/* set reset state */
 126	return hda_dsp_core_reset_enter(sdev, core_mask);
 127}
 128
 129bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
 130{
 131	int val;
 132	bool is_enable;
 133
 134	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 135
 136#define MASK_IS_EQUAL(v, m, field) ({	\
 137	u32 _m = field(m);		\
 138	((v) & _m) == _m;		\
 139})
 140
 141	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
 142		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
 143		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
 144		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 145
 146#undef MASK_IS_EQUAL
 147
 148	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
 149		is_enable, core_mask);
 150
 151	return is_enable;
 152}
 153
 154int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
 155{
 156	int ret;
 157
 158	/* leave reset state */
 159	ret = hda_dsp_core_reset_leave(sdev, core_mask);
 160	if (ret < 0)
 161		return ret;
 162
 163	/* run core */
 164	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
 165	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 166					 HDA_DSP_REG_ADSPCS,
 167					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 168					 0);
 169
 170	/* is core now running ? */
 171	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
 172		hda_dsp_core_stall_reset(sdev, core_mask);
 173		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
 174			core_mask);
 175		ret = -EIO;
 176	}
 177
 178	return ret;
 179}
 180
 181/*
 182 * Power Management.
 183 */
 184
 185int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
 186{
 187	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 188	const struct sof_intel_dsp_desc *chip = hda->desc;
 189	unsigned int cpa;
 190	u32 adspcs;
 191	int ret;
 192
 193	/* restrict core_mask to host managed cores mask */
 194	core_mask &= chip->host_managed_cores_mask;
 195	/* return if core_mask is not valid */
 196	if (!core_mask)
 197		return 0;
 198
 199	/* update bits */
 200	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
 201				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
 202				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
 203
 204	/* poll with timeout to check if operation successful */
 205	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
 206	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 207					    HDA_DSP_REG_ADSPCS, adspcs,
 208					    (adspcs & cpa) == cpa,
 209					    HDA_DSP_REG_POLL_INTERVAL_US,
 210					    HDA_DSP_RESET_TIMEOUT_US);
 211	if (ret < 0) {
 212		dev_err(sdev->dev,
 213			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 214			__func__);
 215		return ret;
 216	}
 217
 218	/* did core power up ? */
 219	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 220				  HDA_DSP_REG_ADSPCS);
 221	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
 222		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
 223		dev_err(sdev->dev,
 224			"error: power up core failed core_mask %xadspcs 0x%x\n",
 225			core_mask, adspcs);
 226		ret = -EIO;
 227	}
 228
 229	return ret;
 230}
 231
 232static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
 233{
 234	u32 adspcs;
 235	int ret;
 236
 237	/* update bits */
 238	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 239					 HDA_DSP_REG_ADSPCS,
 240					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
 241
 242	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 243				HDA_DSP_REG_ADSPCS, adspcs,
 244				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
 245				HDA_DSP_REG_POLL_INTERVAL_US,
 246				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
 247	if (ret < 0)
 248		dev_err(sdev->dev,
 249			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 250			__func__);
 251
 252	return ret;
 253}
 254
 255int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
 256{
 257	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 258	const struct sof_intel_dsp_desc *chip = hda->desc;
 259	int ret;
 260
 261	/* restrict core_mask to host managed cores mask */
 262	core_mask &= chip->host_managed_cores_mask;
 263
 264	/* return if core_mask is not valid or cores are already enabled */
 265	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
 266		return 0;
 267
 268	/* power up */
 269	ret = hda_dsp_core_power_up(sdev, core_mask);
 270	if (ret < 0) {
 271		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
 272			core_mask);
 273		return ret;
 274	}
 275
 276	return hda_dsp_core_run(sdev, core_mask);
 277}
 278
 279int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
 280				  unsigned int core_mask)
 281{
 282	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 283	const struct sof_intel_dsp_desc *chip = hda->desc;
 284	int ret;
 285
 286	/* restrict core_mask to host managed cores mask */
 287	core_mask &= chip->host_managed_cores_mask;
 288
 289	/* return if core_mask is not valid */
 290	if (!core_mask)
 291		return 0;
 292
 293	/* place core in reset prior to power down */
 294	ret = hda_dsp_core_stall_reset(sdev, core_mask);
 295	if (ret < 0) {
 296		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
 297			core_mask);
 298		return ret;
 299	}
 300
 301	/* power down core */
 302	ret = hda_dsp_core_power_down(sdev, core_mask);
 303	if (ret < 0) {
 304		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
 305			core_mask, ret);
 306		return ret;
 307	}
 308
 309	/* make sure we are in OFF state */
 310	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
 311		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
 312			core_mask, ret);
 313		ret = -EIO;
 314	}
 315
 316	return ret;
 317}
 318
 319void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
 320{
 321	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 322	const struct sof_intel_dsp_desc *chip = hda->desc;
 323
 
 
 
 324	/* enable IPC DONE and BUSY interrupts */
 325	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 326			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
 327			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
 328
 329	/* enable IPC interrupt */
 330	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 331				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
 332}
 333
 334void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
 335{
 336	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 337	const struct sof_intel_dsp_desc *chip = hda->desc;
 338
 
 
 
 339	/* disable IPC interrupt */
 340	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 341				HDA_DSP_ADSPIC_IPC, 0);
 342
 343	/* disable IPC BUSY and DONE interrupt */
 344	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 345			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
 346}
 347
 348static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
 349{
 350	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
 351	struct snd_sof_pdata *pdata = sdev->pdata;
 352	const struct sof_intel_dsp_desc *chip;
 353
 354	chip = get_chip_info(pdata);
 355	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
 356		SOF_HDA_VS_D0I3C_CIP) {
 357		if (!retry--)
 358			return -ETIMEDOUT;
 359		usleep_range(10, 15);
 360	}
 361
 362	return 0;
 363}
 364
 365static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
 366{
 367	struct sof_ipc_pm_gate pm_gate;
 368	struct sof_ipc_reply reply;
 369
 370	memset(&pm_gate, 0, sizeof(pm_gate));
 
 371
 372	/* configure pm_gate ipc message */
 373	pm_gate.hdr.size = sizeof(pm_gate);
 374	pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
 375	pm_gate.flags = flags;
 376
 377	/* send pm_gate ipc to dsp */
 378	return sof_ipc_tx_message_no_pm(sdev->ipc, &pm_gate, sizeof(pm_gate),
 379					&reply, sizeof(reply));
 380}
 381
 382static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
 383{
 384	struct snd_sof_pdata *pdata = sdev->pdata;
 385	const struct sof_intel_dsp_desc *chip;
 386	int ret;
 387	u8 reg;
 388
 389	chip = get_chip_info(pdata);
 390
 391	/* Write to D0I3C after Command-In-Progress bit is cleared */
 392	ret = hda_dsp_wait_d0i3c_done(sdev);
 393	if (ret < 0) {
 394		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
 395		return ret;
 396	}
 397
 398	/* Update D0I3C register */
 399	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
 400			    SOF_HDA_VS_D0I3C_I3, value);
 401
 
 
 
 
 
 
 402	/* Wait for cmd in progress to be cleared before exiting the function */
 403	ret = hda_dsp_wait_d0i3c_done(sdev);
 404	if (ret < 0) {
 405		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
 406		return ret;
 407	}
 408
 409	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
 
 
 
 
 
 
 410	trace_sof_intel_D0I3C_updated(sdev, reg);
 411
 412	return 0;
 413}
 414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
 416				const struct sof_dsp_power_state *target_state)
 417{
 418	u32 flags = 0;
 419	int ret;
 420	u8 value = 0;
 421
 422	/*
 423	 * Sanity check for illegal state transitions
 424	 * The only allowed transitions are:
 425	 * 1. D3 -> D0I0
 426	 * 2. D0I0 -> D0I3
 427	 * 3. D0I3 -> D0I0
 428	 */
 429	switch (sdev->dsp_power_state.state) {
 430	case SOF_DSP_PM_D0:
 431		/* Follow the sequence below for D0 substate transitions */
 432		break;
 433	case SOF_DSP_PM_D3:
 434		/* Follow regular flow for D3 -> D0 transition */
 435		return 0;
 436	default:
 437		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
 438			sdev->dsp_power_state.state, target_state->state);
 439		return -EINVAL;
 440	}
 441
 442	/* Set flags and register value for D0 target substate */
 443	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
 444		value = SOF_HDA_VS_D0I3C_I3;
 445
 446		/*
 447		 * Trace DMA need to be disabled when the DSP enters
 448		 * D0I3 for S0Ix suspend, but it can be kept enabled
 449		 * when the DSP enters D0I3 while the system is in S0
 450		 * for debug purpose.
 451		 */
 452		if (!sdev->fw_trace_is_supported ||
 453		    !hda_enable_trace_D0I3_S0 ||
 454		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
 455			flags = HDA_PM_NO_DMA_TRACE;
 
 
 
 456	} else {
 457		/* prevent power gating in D0I0 */
 458		flags = HDA_PM_PPG;
 459	}
 460
 461	/* update D0I3C register */
 462	ret = hda_dsp_update_d0i3c_register(sdev, value);
 463	if (ret < 0)
 464		return ret;
 465
 466	/*
 467	 * Notify the DSP of the state change.
 468	 * If this IPC fails, revert the D0I3C register update in order
 469	 * to prevent partial state change.
 470	 */
 471	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
 472	if (ret < 0) {
 473		dev_err(sdev->dev,
 474			"error: PM_GATE ipc error %d\n", ret);
 475		goto revert;
 476	}
 477
 478	return ret;
 479
 480revert:
 481	/* fallback to the previous register value */
 482	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
 483
 484	/*
 485	 * This can fail but return the IPC error to signal that
 486	 * the state change failed.
 487	 */
 488	hda_dsp_update_d0i3c_register(sdev, value);
 489
 490	return ret;
 491}
 492
 493/* helper to log DSP state */
 494static void hda_dsp_state_log(struct snd_sof_dev *sdev)
 495{
 496	switch (sdev->dsp_power_state.state) {
 497	case SOF_DSP_PM_D0:
 498		switch (sdev->dsp_power_state.substate) {
 499		case SOF_HDA_DSP_PM_D0I0:
 500			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
 501			break;
 502		case SOF_HDA_DSP_PM_D0I3:
 503			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
 504			break;
 505		default:
 506			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
 507				sdev->dsp_power_state.substate);
 508			break;
 509		}
 510		break;
 511	case SOF_DSP_PM_D1:
 512		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
 513		break;
 514	case SOF_DSP_PM_D2:
 515		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
 516		break;
 517	case SOF_DSP_PM_D3:
 518		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
 519		break;
 520	default:
 521		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
 522			sdev->dsp_power_state.state);
 523		break;
 524	}
 525}
 526
 527/*
 528 * All DSP power state transitions are initiated by the driver.
 529 * If the requested state change fails, the error is simply returned.
 530 * Further state transitions are attempted only when the set_power_save() op
 531 * is called again either because of a new IPC sent to the DSP or
 532 * during system suspend/resume.
 533 */
 534int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
 535			    const struct sof_dsp_power_state *target_state)
 536{
 537	int ret = 0;
 538
 539	/*
 540	 * When the DSP is already in D0I3 and the target state is D0I3,
 541	 * it could be the case that the DSP is in D0I3 during S0
 542	 * and the system is suspending to S0Ix. Therefore,
 543	 * hda_dsp_set_D0_state() must be called to disable trace DMA
 544	 * by sending the PM_GATE IPC to the FW.
 545	 */
 546	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
 547	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
 548		goto set_state;
 549
 550	/*
 551	 * For all other cases, return without doing anything if
 552	 * the DSP is already in the target state.
 553	 */
 554	if (target_state->state == sdev->dsp_power_state.state &&
 555	    target_state->substate == sdev->dsp_power_state.substate)
 556		return 0;
 557
 558set_state:
 559	switch (target_state->state) {
 560	case SOF_DSP_PM_D0:
 561		ret = hda_dsp_set_D0_state(sdev, target_state);
 562		break;
 563	case SOF_DSP_PM_D3:
 564		/* The only allowed transition is: D0I0 -> D3 */
 565		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
 566		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
 567			break;
 568
 569		dev_err(sdev->dev,
 570			"error: transition from %d to %d not allowed\n",
 571			sdev->dsp_power_state.state, target_state->state);
 572		return -EINVAL;
 573	default:
 574		dev_err(sdev->dev, "error: target state unsupported %d\n",
 575			target_state->state);
 576		return -EINVAL;
 577	}
 578	if (ret < 0) {
 579		dev_err(sdev->dev,
 580			"failed to set requested target DSP state %d substate %d\n",
 581			target_state->state, target_state->substate);
 582		return ret;
 583	}
 584
 585	sdev->dsp_power_state = *target_state;
 586	hda_dsp_state_log(sdev);
 587	return ret;
 588}
 589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590/*
 591 * Audio DSP states may transform as below:-
 592 *
 593 *                                         Opportunistic D0I3 in S0
 594 *     Runtime    +---------------------+  Delayed D0i3 work timeout
 595 *     suspend    |                     +--------------------+
 596 *   +------------+       D0I0(active)  |                    |
 597 *   |            |                     <---------------+    |
 598 *   |   +-------->                     |    New IPC	|    |
 599 *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
 600 *   |   |resume     |  |         |  |			|    |
 601 *   |   |           |  |         |  |			|    |
 602 *   |   |     System|  |         |  |			|    |
 603 *   |   |     resume|  | S3/S0IX |  |                  |    |
 604 *   |   |	     |  | suspend |  | S0IX             |    |
 605 *   |   |           |  |         |  |suspend           |    |
 606 *   |   |           |  |         |  |                  |    |
 607 *   |   |           |  |         |  |                  |    |
 608 * +-v---+-----------+--v-------+ |  |           +------+----v----+
 609 * |                            | |  +----------->                |
 610 * |       D3 (suspended)       | |              |      D0I3      |
 611 * |                            | +--------------+                |
 612 * |                            |  System resume |                |
 613 * +----------------------------+		 +----------------+
 614 *
 615 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
 616 *		 ignored the suspend trigger. Otherwise the DSP
 617 *		 is in D3.
 618 */
 619
 620static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 621{
 622	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 623	const struct sof_intel_dsp_desc *chip = hda->desc;
 624	struct hdac_bus *bus = sof_to_bus(sdev);
 625	int ret, j;
 626
 627	/*
 628	 * The memory used for IMR boot loses its content in deeper than S3 state
 629	 * We must not try IMR boot on next power up (as it will fail).
 630	 *
 631	 * In case of firmware crash or boot failure set the skip_imr_boot to true
 632	 * as well in order to try to re-load the firmware to do a 'cold' boot.
 633	 */
 634	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
 635	    sdev->fw_state == SOF_FW_CRASHED ||
 636	    sdev->fw_state == SOF_FW_BOOT_FAILED)
 637		hda->skip_imr_boot = true;
 638
 639	ret = chip->disable_interrupts(sdev);
 640	if (ret < 0)
 641		return ret;
 642
 
 
 
 643	hda_codec_jack_wake_enable(sdev, runtime_suspend);
 644
 645	/* power down all hda links */
 646	hda_bus_ml_suspend(bus);
 647
 
 
 
 648	ret = chip->power_down_dsp(sdev);
 649	if (ret < 0) {
 650		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
 651		return ret;
 652	}
 653
 654	/* reset ref counts for all cores */
 655	for (j = 0; j < chip->cores_num; j++)
 656		sdev->dsp_core_ref_count[j] = 0;
 657
 658	/* disable ppcap interrupt */
 659	hda_dsp_ctrl_ppcap_enable(sdev, false);
 660	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
 
 661
 662	/* disable hda bus irq and streams */
 663	hda_dsp_ctrl_stop_chip(sdev);
 664
 665	/* disable LP retention mode */
 666	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
 667				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
 668
 669	/* reset controller */
 670	ret = hda_dsp_ctrl_link_reset(sdev, true);
 671	if (ret < 0) {
 672		dev_err(sdev->dev,
 673			"error: failed to reset controller during suspend\n");
 674		return ret;
 675	}
 676
 677	/* display codec can powered off after link reset */
 678	hda_codec_i915_display_power(sdev, false);
 679
 680	return 0;
 681}
 682
 683static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 684{
 685	int ret;
 686
 687	/* display codec must be powered before link reset */
 688	hda_codec_i915_display_power(sdev, true);
 689
 690	/*
 691	 * clear TCSEL to clear playback on some HD Audio
 692	 * codecs. PCI TCSEL is defined in the Intel manuals.
 693	 */
 694	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
 695
 696	/* reset and start hda controller */
 697	ret = hda_dsp_ctrl_init_chip(sdev);
 698	if (ret < 0) {
 699		dev_err(sdev->dev,
 700			"error: failed to start controller after resume\n");
 701		goto cleanup;
 702	}
 703
 704	/* check jack status */
 705	if (runtime_resume) {
 706		hda_codec_jack_wake_enable(sdev, false);
 707		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
 708			hda_codec_jack_check(sdev);
 709	}
 710
 711	/* enable ppcap interrupt */
 712	hda_dsp_ctrl_ppcap_enable(sdev, true);
 713	hda_dsp_ctrl_ppcap_int_enable(sdev, true);
 
 
 714
 715cleanup:
 716	/* display codec can powered off after controller init */
 717	hda_codec_i915_display_power(sdev, false);
 718
 719	return 0;
 720}
 721
 722int hda_dsp_resume(struct snd_sof_dev *sdev)
 723{
 724	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 725	struct hdac_bus *bus = sof_to_bus(sdev);
 726	struct pci_dev *pci = to_pci_dev(sdev->dev);
 727	const struct sof_dsp_power_state target_state = {
 728		.state = SOF_DSP_PM_D0,
 729		.substate = SOF_HDA_DSP_PM_D0I0,
 730	};
 731	int ret;
 732
 733	/* resume from D0I3 */
 734	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
 735		ret = hda_bus_ml_resume(bus);
 736		if (ret < 0) {
 737			dev_err(sdev->dev,
 738				"error %d in %s: failed to power up links",
 739				ret, __func__);
 740			return ret;
 741		}
 742
 743		/* set up CORB/RIRB buffers if was on before suspend */
 744		hda_codec_resume_cmd_io(sdev);
 745
 746		/* Set DSP power state */
 747		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
 748		if (ret < 0) {
 749			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 750				target_state.state, target_state.substate);
 751			return ret;
 752		}
 753
 754		/* restore L1SEN bit */
 755		if (hda->l1_support_changed)
 756			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
 757						HDA_VS_INTEL_EM2,
 758						HDA_VS_INTEL_EM2_L1SEN, 0);
 759
 760		/* restore and disable the system wakeup */
 761		pci_restore_state(pci);
 762		disable_irq_wake(pci->irq);
 763		return 0;
 764	}
 765
 766	/* init hda controller. DSP cores will be powered up during fw boot */
 767	ret = hda_resume(sdev, false);
 768	if (ret < 0)
 769		return ret;
 770
 771	return snd_sof_dsp_set_power_state(sdev, &target_state);
 772}
 773
 774int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
 775{
 776	const struct sof_dsp_power_state target_state = {
 777		.state = SOF_DSP_PM_D0,
 778	};
 779	int ret;
 780
 781	/* init hda controller. DSP cores will be powered up during fw boot */
 782	ret = hda_resume(sdev, true);
 783	if (ret < 0)
 784		return ret;
 785
 786	return snd_sof_dsp_set_power_state(sdev, &target_state);
 787}
 788
 789int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
 790{
 791	struct hdac_bus *hbus = sof_to_bus(sdev);
 792
 793	if (hbus->codec_powered) {
 794		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
 795			(unsigned int)hbus->codec_powered);
 796		return -EBUSY;
 797	}
 798
 799	return 0;
 800}
 801
 802int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
 803{
 804	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 805	const struct sof_dsp_power_state target_state = {
 806		.state = SOF_DSP_PM_D3,
 807	};
 808	int ret;
 809
 810	/* cancel any attempt for DSP D0I3 */
 811	cancel_delayed_work_sync(&hda->d0i3_work);
 
 
 812
 813	/* stop hda controller and power dsp off */
 814	ret = hda_suspend(sdev, true);
 815	if (ret < 0)
 816		return ret;
 817
 818	return snd_sof_dsp_set_power_state(sdev, &target_state);
 819}
 820
 821int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
 822{
 823	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 824	struct hdac_bus *bus = sof_to_bus(sdev);
 825	struct pci_dev *pci = to_pci_dev(sdev->dev);
 826	const struct sof_dsp_power_state target_dsp_state = {
 827		.state = target_state,
 828		.substate = target_state == SOF_DSP_PM_D0 ?
 829				SOF_HDA_DSP_PM_D0I3 : 0,
 830	};
 831	int ret;
 832
 833	/* cancel any attempt for DSP D0I3 */
 834	cancel_delayed_work_sync(&hda->d0i3_work);
 
 
 835
 836	if (target_state == SOF_DSP_PM_D0) {
 837		/* Set DSP power state */
 838		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 839		if (ret < 0) {
 840			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 841				target_dsp_state.state,
 842				target_dsp_state.substate);
 843			return ret;
 844		}
 845
 846		/* enable L1SEN to make sure the system can enter S0Ix */
 847		hda->l1_support_changed =
 848			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
 849						HDA_VS_INTEL_EM2,
 850						HDA_VS_INTEL_EM2_L1SEN,
 851						HDA_VS_INTEL_EM2_L1SEN);
 852
 853		/* stop the CORB/RIRB DMA if it is On */
 854		hda_codec_suspend_cmd_io(sdev);
 855
 856		/* no link can be powered in s0ix state */
 857		ret = hda_bus_ml_suspend(bus);
 858		if (ret < 0) {
 859			dev_err(sdev->dev,
 860				"error %d in %s: failed to power down links",
 861				ret, __func__);
 862			return ret;
 863		}
 864
 865		/* enable the system waking up via IPC IRQ */
 866		enable_irq_wake(pci->irq);
 867		pci_save_state(pci);
 868		return 0;
 869	}
 870
 871	/* stop hda controller and power dsp off */
 872	ret = hda_suspend(sdev, false);
 873	if (ret < 0) {
 874		dev_err(bus->dev, "error: suspending dsp\n");
 875		return ret;
 876	}
 877
 878	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 879}
 880
 881static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
 882{
 883	struct hdac_bus *bus = sof_to_bus(sdev);
 884	struct hdac_stream *s;
 885	unsigned int active_streams = 0;
 886	int sd_offset;
 887	u32 val;
 888
 889	list_for_each_entry(s, &bus->stream_list, list) {
 890		sd_offset = SOF_STREAM_SD_OFFSET(s);
 891		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
 892				       sd_offset);
 893		if (val & SOF_HDA_SD_CTL_DMA_START)
 894			active_streams |= BIT(s->index);
 895	}
 896
 897	return active_streams;
 898}
 899
 900static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
 901{
 902	int ret;
 903
 904	/*
 905	 * Do not assume a certain timing between the prior
 906	 * suspend flow, and running of this quirk function.
 907	 * This is needed if the controller was just put
 908	 * to reset before calling this function.
 909	 */
 910	usleep_range(500, 1000);
 911
 912	/*
 913	 * Take controller out of reset to flush DMA
 914	 * transactions.
 915	 */
 916	ret = hda_dsp_ctrl_link_reset(sdev, false);
 917	if (ret < 0)
 918		return ret;
 919
 920	usleep_range(500, 1000);
 921
 922	/* Restore state for shutdown, back to reset */
 923	ret = hda_dsp_ctrl_link_reset(sdev, true);
 924	if (ret < 0)
 925		return ret;
 926
 927	return ret;
 928}
 929
 930int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
 931{
 932	unsigned int active_streams;
 933	int ret, ret2;
 934
 935	/* check if DMA cleanup has been successful */
 936	active_streams = hda_dsp_check_for_dma_streams(sdev);
 937
 938	sdev->system_suspend_target = SOF_SUSPEND_S3;
 939	ret = snd_sof_suspend(sdev->dev);
 940
 941	if (active_streams) {
 942		dev_warn(sdev->dev,
 943			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
 944			 active_streams);
 945		ret2 = hda_dsp_s5_quirk(sdev);
 946		if (ret2 < 0)
 947			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
 948	}
 949
 950	return ret;
 951}
 952
 953int hda_dsp_shutdown(struct snd_sof_dev *sdev)
 954{
 955	sdev->system_suspend_target = SOF_SUSPEND_S3;
 956	return snd_sof_suspend(sdev->dev);
 957}
 958
 959int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
 960{
 961	int ret;
 962
 963	/* make sure all DAI resources are freed */
 964	ret = hda_dsp_dais_suspend(sdev);
 965	if (ret < 0)
 966		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
 967
 968	return ret;
 969}
 970
 971void hda_dsp_d0i3_work(struct work_struct *work)
 972{
 973	struct sof_intel_hda_dev *hdev = container_of(work,
 974						      struct sof_intel_hda_dev,
 975						      d0i3_work.work);
 976	struct hdac_bus *bus = &hdev->hbus.core;
 977	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
 978	struct sof_dsp_power_state target_state = {
 979		.state = SOF_DSP_PM_D0,
 980		.substate = SOF_HDA_DSP_PM_D0I3,
 981	};
 982	int ret;
 983
 984	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
 985	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
 986		/* remain in D0I0 */
 987		return;
 988
 989	/* This can fail but error cannot be propagated */
 990	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
 991	if (ret < 0)
 992		dev_err_ratelimited(sdev->dev,
 993				    "error: failed to set DSP state %d substate %d\n",
 994				    target_state.state, target_state.substate);
 995}
 996
 997int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
 998{
 999	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1000	int ret, ret1;
1001
1002	/* power up core */
1003	ret = hda_dsp_enable_core(sdev, BIT(core));
1004	if (ret < 0) {
1005		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1006			core, ret);
1007		return ret;
1008	}
1009
1010	/* No need to send IPC for primary core or if FW boot is not complete */
1011	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1012		return 0;
1013
1014	/* No need to continue the set_core_state ops is not available */
1015	if (!pm_ops->set_core_state)
1016		return 0;
1017
1018	/* Now notify DSP for secondary cores */
1019	ret = pm_ops->set_core_state(sdev, core, true);
1020	if (ret < 0) {
1021		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1022			core, ret);
1023		goto power_down;
1024	}
1025
1026	return ret;
1027
1028power_down:
1029	/* power down core if it is host managed and return the original error if this fails too */
1030	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1031	if (ret1 < 0)
1032		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1033
1034	return ret;
1035}
1036
1037int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1038{
1039	hda_sdw_int_enable(sdev, false);
1040	hda_dsp_ipc_int_disable(sdev);
1041
1042	return 0;
1043}
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2//
   3// This file is provided under a dual BSD/GPLv2 license.  When using or
   4// redistributing this file, you may do so under either license.
   5//
   6// Copyright(c) 2018 Intel Corporation. All rights reserved.
   7//
   8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
   9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
  10//	    Rander Wang <rander.wang@intel.com>
  11//          Keyon Jie <yang.jie@linux.intel.com>
  12//
  13
  14/*
  15 * Hardware interface for generic Intel audio DSP HDA IP
  16 */
  17
  18#include <linux/module.h>
  19#include <sound/hdaudio_ext.h>
  20#include <sound/hda_register.h>
  21#include <sound/hda-mlink.h>
  22#include <trace/events/sof_intel.h>
  23#include "../sof-audio.h"
  24#include "../ops.h"
  25#include "hda.h"
  26#include "hda-ipc.h"
  27
  28static bool hda_enable_trace_D0I3_S0;
  29#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
  30module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
  31MODULE_PARM_DESC(enable_trace_D0I3_S0,
  32		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
  33#endif
  34
  35/*
  36 * DSP Core control.
  37 */
  38
  39static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
  40{
  41	u32 adspcs;
  42	u32 reset;
  43	int ret;
  44
  45	/* set reset bits for cores */
  46	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  47	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  48					 HDA_DSP_REG_ADSPCS,
  49					 reset, reset);
  50
  51	/* poll with timeout to check if operation successful */
  52	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  53					HDA_DSP_REG_ADSPCS, adspcs,
  54					((adspcs & reset) == reset),
  55					HDA_DSP_REG_POLL_INTERVAL_US,
  56					HDA_DSP_RESET_TIMEOUT_US);
  57	if (ret < 0) {
  58		dev_err(sdev->dev,
  59			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
  60			__func__);
  61		return ret;
  62	}
  63
  64	/* has core entered reset ? */
  65	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
  66				  HDA_DSP_REG_ADSPCS);
  67	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
  68		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
  69		dev_err(sdev->dev,
  70			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
  71			core_mask, adspcs);
  72		ret = -EIO;
  73	}
  74
  75	return ret;
  76}
  77
  78static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
  79{
  80	unsigned int crst;
  81	u32 adspcs;
  82	int ret;
  83
  84	/* clear reset bits for cores */
  85	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  86					 HDA_DSP_REG_ADSPCS,
  87					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
  88					 0);
  89
  90	/* poll with timeout to check if operation successful */
  91	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  92	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  93					    HDA_DSP_REG_ADSPCS, adspcs,
  94					    !(adspcs & crst),
  95					    HDA_DSP_REG_POLL_INTERVAL_US,
  96					    HDA_DSP_RESET_TIMEOUT_US);
  97
  98	if (ret < 0) {
  99		dev_err(sdev->dev,
 100			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 101			__func__);
 102		return ret;
 103	}
 104
 105	/* has core left reset ? */
 106	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 107				  HDA_DSP_REG_ADSPCS);
 108	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
 109		dev_err(sdev->dev,
 110			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
 111			core_mask, adspcs);
 112		ret = -EIO;
 113	}
 114
 115	return ret;
 116}
 117
 118int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
 119{
 120	/* stall core */
 121	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 122					 HDA_DSP_REG_ADSPCS,
 123					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 124					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 125
 126	/* set reset state */
 127	return hda_dsp_core_reset_enter(sdev, core_mask);
 128}
 129
 130bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
 131{
 132	int val;
 133	bool is_enable;
 134
 135	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 136
 137#define MASK_IS_EQUAL(v, m, field) ({	\
 138	u32 _m = field(m);		\
 139	((v) & _m) == _m;		\
 140})
 141
 142	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
 143		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
 144		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
 145		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 146
 147#undef MASK_IS_EQUAL
 148
 149	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
 150		is_enable, core_mask);
 151
 152	return is_enable;
 153}
 154
 155int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
 156{
 157	int ret;
 158
 159	/* leave reset state */
 160	ret = hda_dsp_core_reset_leave(sdev, core_mask);
 161	if (ret < 0)
 162		return ret;
 163
 164	/* run core */
 165	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
 166	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 167					 HDA_DSP_REG_ADSPCS,
 168					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 169					 0);
 170
 171	/* is core now running ? */
 172	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
 173		hda_dsp_core_stall_reset(sdev, core_mask);
 174		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
 175			core_mask);
 176		ret = -EIO;
 177	}
 178
 179	return ret;
 180}
 181
 182/*
 183 * Power Management.
 184 */
 185
 186int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
 187{
 188	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 189	const struct sof_intel_dsp_desc *chip = hda->desc;
 190	unsigned int cpa;
 191	u32 adspcs;
 192	int ret;
 193
 194	/* restrict core_mask to host managed cores mask */
 195	core_mask &= chip->host_managed_cores_mask;
 196	/* return if core_mask is not valid */
 197	if (!core_mask)
 198		return 0;
 199
 200	/* update bits */
 201	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
 202				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
 203				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
 204
 205	/* poll with timeout to check if operation successful */
 206	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
 207	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 208					    HDA_DSP_REG_ADSPCS, adspcs,
 209					    (adspcs & cpa) == cpa,
 210					    HDA_DSP_REG_POLL_INTERVAL_US,
 211					    HDA_DSP_RESET_TIMEOUT_US);
 212	if (ret < 0) {
 213		dev_err(sdev->dev,
 214			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 215			__func__);
 216		return ret;
 217	}
 218
 219	/* did core power up ? */
 220	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 221				  HDA_DSP_REG_ADSPCS);
 222	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
 223		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
 224		dev_err(sdev->dev,
 225			"error: power up core failed core_mask %xadspcs 0x%x\n",
 226			core_mask, adspcs);
 227		ret = -EIO;
 228	}
 229
 230	return ret;
 231}
 232
 233static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
 234{
 235	u32 adspcs;
 236	int ret;
 237
 238	/* update bits */
 239	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 240					 HDA_DSP_REG_ADSPCS,
 241					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
 242
 243	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 244				HDA_DSP_REG_ADSPCS, adspcs,
 245				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
 246				HDA_DSP_REG_POLL_INTERVAL_US,
 247				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
 248	if (ret < 0)
 249		dev_err(sdev->dev,
 250			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 251			__func__);
 252
 253	return ret;
 254}
 255
 256int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
 257{
 258	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 259	const struct sof_intel_dsp_desc *chip = hda->desc;
 260	int ret;
 261
 262	/* restrict core_mask to host managed cores mask */
 263	core_mask &= chip->host_managed_cores_mask;
 264
 265	/* return if core_mask is not valid or cores are already enabled */
 266	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
 267		return 0;
 268
 269	/* power up */
 270	ret = hda_dsp_core_power_up(sdev, core_mask);
 271	if (ret < 0) {
 272		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
 273			core_mask);
 274		return ret;
 275	}
 276
 277	return hda_dsp_core_run(sdev, core_mask);
 278}
 279
 280int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
 281				  unsigned int core_mask)
 282{
 283	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 284	const struct sof_intel_dsp_desc *chip = hda->desc;
 285	int ret;
 286
 287	/* restrict core_mask to host managed cores mask */
 288	core_mask &= chip->host_managed_cores_mask;
 289
 290	/* return if core_mask is not valid */
 291	if (!core_mask)
 292		return 0;
 293
 294	/* place core in reset prior to power down */
 295	ret = hda_dsp_core_stall_reset(sdev, core_mask);
 296	if (ret < 0) {
 297		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
 298			core_mask);
 299		return ret;
 300	}
 301
 302	/* power down core */
 303	ret = hda_dsp_core_power_down(sdev, core_mask);
 304	if (ret < 0) {
 305		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
 306			core_mask, ret);
 307		return ret;
 308	}
 309
 310	/* make sure we are in OFF state */
 311	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
 312		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
 313			core_mask, ret);
 314		ret = -EIO;
 315	}
 316
 317	return ret;
 318}
 319
 320void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
 321{
 322	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 323	const struct sof_intel_dsp_desc *chip = hda->desc;
 324
 325	if (sdev->dspless_mode_selected)
 326		return;
 327
 328	/* enable IPC DONE and BUSY interrupts */
 329	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 330			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
 331			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
 332
 333	/* enable IPC interrupt */
 334	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 335				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
 336}
 337
 338void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
 339{
 340	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 341	const struct sof_intel_dsp_desc *chip = hda->desc;
 342
 343	if (sdev->dspless_mode_selected)
 344		return;
 345
 346	/* disable IPC interrupt */
 347	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 348				HDA_DSP_ADSPIC_IPC, 0);
 349
 350	/* disable IPC BUSY and DONE interrupt */
 351	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 352			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
 353}
 354
 355static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
 356{
 357	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
 358	struct snd_sof_pdata *pdata = sdev->pdata;
 359	const struct sof_intel_dsp_desc *chip;
 360
 361	chip = get_chip_info(pdata);
 362	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
 363		SOF_HDA_VS_D0I3C_CIP) {
 364		if (!retry--)
 365			return -ETIMEDOUT;
 366		usleep_range(10, 15);
 367	}
 368
 369	return 0;
 370}
 371
 372static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
 373{
 374	const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
 
 375
 376	if (pm_ops && pm_ops->set_pm_gate)
 377		return pm_ops->set_pm_gate(sdev, flags);
 378
 379	return 0;
 
 
 
 
 
 
 
 380}
 381
 382static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
 383{
 384	struct snd_sof_pdata *pdata = sdev->pdata;
 385	const struct sof_intel_dsp_desc *chip;
 386	int ret;
 387	u8 reg;
 388
 389	chip = get_chip_info(pdata);
 390
 391	/* Write to D0I3C after Command-In-Progress bit is cleared */
 392	ret = hda_dsp_wait_d0i3c_done(sdev);
 393	if (ret < 0) {
 394		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
 395		return ret;
 396	}
 397
 398	/* Update D0I3C register */
 399	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
 400			    SOF_HDA_VS_D0I3C_I3, value);
 401
 402	/*
 403	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
 404	 * A delay is recommended before checking if D0I3C::CIP is cleared
 405	 */
 406	usleep_range(30, 40);
 407
 408	/* Wait for cmd in progress to be cleared before exiting the function */
 409	ret = hda_dsp_wait_d0i3c_done(sdev);
 410	if (ret < 0) {
 411		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
 412		return ret;
 413	}
 414
 415	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
 416	/* Confirm d0i3 state changed with paranoia check */
 417	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
 418		dev_err(sdev->dev, "failed to update D0I3C!\n");
 419		return -EIO;
 420	}
 421
 422	trace_sof_intel_D0I3C_updated(sdev, reg);
 423
 424	return 0;
 425}
 426
 427/*
 428 * d0i3 streaming is enabled if all the active streams can
 429 * work in d0i3 state and playback is enabled
 430 */
 431static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
 432{
 433	struct snd_pcm_substream *substream;
 434	struct snd_sof_pcm *spcm;
 435	bool playback_active = false;
 436	int dir;
 437
 438	list_for_each_entry(spcm, &sdev->pcm_list, list) {
 439		for_each_pcm_streams(dir) {
 440			substream = spcm->stream[dir].substream;
 441			if (!substream || !substream->runtime)
 442				continue;
 443
 444			if (!spcm->stream[dir].d0i3_compatible)
 445				return false;
 446
 447			if (dir == SNDRV_PCM_STREAM_PLAYBACK)
 448				playback_active = true;
 449		}
 450	}
 451
 452	return playback_active;
 453}
 454
 455static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
 456				const struct sof_dsp_power_state *target_state)
 457{
 458	u32 flags = 0;
 459	int ret;
 460	u8 value = 0;
 461
 462	/*
 463	 * Sanity check for illegal state transitions
 464	 * The only allowed transitions are:
 465	 * 1. D3 -> D0I0
 466	 * 2. D0I0 -> D0I3
 467	 * 3. D0I3 -> D0I0
 468	 */
 469	switch (sdev->dsp_power_state.state) {
 470	case SOF_DSP_PM_D0:
 471		/* Follow the sequence below for D0 substate transitions */
 472		break;
 473	case SOF_DSP_PM_D3:
 474		/* Follow regular flow for D3 -> D0 transition */
 475		return 0;
 476	default:
 477		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
 478			sdev->dsp_power_state.state, target_state->state);
 479		return -EINVAL;
 480	}
 481
 482	/* Set flags and register value for D0 target substate */
 483	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
 484		value = SOF_HDA_VS_D0I3C_I3;
 485
 486		/*
 487		 * Trace DMA need to be disabled when the DSP enters
 488		 * D0I3 for S0Ix suspend, but it can be kept enabled
 489		 * when the DSP enters D0I3 while the system is in S0
 490		 * for debug purpose.
 491		 */
 492		if (!sdev->fw_trace_is_supported ||
 493		    !hda_enable_trace_D0I3_S0 ||
 494		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
 495			flags = HDA_PM_NO_DMA_TRACE;
 496
 497		if (hda_dsp_d0i3_streaming_applicable(sdev))
 498			flags |= HDA_PM_PG_STREAMING;
 499	} else {
 500		/* prevent power gating in D0I0 */
 501		flags = HDA_PM_PPG;
 502	}
 503
 504	/* update D0I3C register */
 505	ret = hda_dsp_update_d0i3c_register(sdev, value);
 506	if (ret < 0)
 507		return ret;
 508
 509	/*
 510	 * Notify the DSP of the state change.
 511	 * If this IPC fails, revert the D0I3C register update in order
 512	 * to prevent partial state change.
 513	 */
 514	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
 515	if (ret < 0) {
 516		dev_err(sdev->dev,
 517			"error: PM_GATE ipc error %d\n", ret);
 518		goto revert;
 519	}
 520
 521	return ret;
 522
 523revert:
 524	/* fallback to the previous register value */
 525	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
 526
 527	/*
 528	 * This can fail but return the IPC error to signal that
 529	 * the state change failed.
 530	 */
 531	hda_dsp_update_d0i3c_register(sdev, value);
 532
 533	return ret;
 534}
 535
 536/* helper to log DSP state */
 537static void hda_dsp_state_log(struct snd_sof_dev *sdev)
 538{
 539	switch (sdev->dsp_power_state.state) {
 540	case SOF_DSP_PM_D0:
 541		switch (sdev->dsp_power_state.substate) {
 542		case SOF_HDA_DSP_PM_D0I0:
 543			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
 544			break;
 545		case SOF_HDA_DSP_PM_D0I3:
 546			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
 547			break;
 548		default:
 549			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
 550				sdev->dsp_power_state.substate);
 551			break;
 552		}
 553		break;
 554	case SOF_DSP_PM_D1:
 555		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
 556		break;
 557	case SOF_DSP_PM_D2:
 558		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
 559		break;
 560	case SOF_DSP_PM_D3:
 561		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
 562		break;
 563	default:
 564		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
 565			sdev->dsp_power_state.state);
 566		break;
 567	}
 568}
 569
 570/*
 571 * All DSP power state transitions are initiated by the driver.
 572 * If the requested state change fails, the error is simply returned.
 573 * Further state transitions are attempted only when the set_power_save() op
 574 * is called again either because of a new IPC sent to the DSP or
 575 * during system suspend/resume.
 576 */
 577static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
 578				   const struct sof_dsp_power_state *target_state)
 579{
 580	int ret = 0;
 581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582	switch (target_state->state) {
 583	case SOF_DSP_PM_D0:
 584		ret = hda_dsp_set_D0_state(sdev, target_state);
 585		break;
 586	case SOF_DSP_PM_D3:
 587		/* The only allowed transition is: D0I0 -> D3 */
 588		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
 589		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
 590			break;
 591
 592		dev_err(sdev->dev,
 593			"error: transition from %d to %d not allowed\n",
 594			sdev->dsp_power_state.state, target_state->state);
 595		return -EINVAL;
 596	default:
 597		dev_err(sdev->dev, "error: target state unsupported %d\n",
 598			target_state->state);
 599		return -EINVAL;
 600	}
 601	if (ret < 0) {
 602		dev_err(sdev->dev,
 603			"failed to set requested target DSP state %d substate %d\n",
 604			target_state->state, target_state->substate);
 605		return ret;
 606	}
 607
 608	sdev->dsp_power_state = *target_state;
 609	hda_dsp_state_log(sdev);
 610	return ret;
 611}
 612
 613int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
 614				 const struct sof_dsp_power_state *target_state)
 615{
 616	/*
 617	 * When the DSP is already in D0I3 and the target state is D0I3,
 618	 * it could be the case that the DSP is in D0I3 during S0
 619	 * and the system is suspending to S0Ix. Therefore,
 620	 * hda_dsp_set_D0_state() must be called to disable trace DMA
 621	 * by sending the PM_GATE IPC to the FW.
 622	 */
 623	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
 624	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
 625		return hda_dsp_set_power_state(sdev, target_state);
 626
 627	/*
 628	 * For all other cases, return without doing anything if
 629	 * the DSP is already in the target state.
 630	 */
 631	if (target_state->state == sdev->dsp_power_state.state &&
 632	    target_state->substate == sdev->dsp_power_state.substate)
 633		return 0;
 634
 635	return hda_dsp_set_power_state(sdev, target_state);
 636}
 637
 638int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
 639				 const struct sof_dsp_power_state *target_state)
 640{
 641	/* Return without doing anything if the DSP is already in the target state */
 642	if (target_state->state == sdev->dsp_power_state.state &&
 643	    target_state->substate == sdev->dsp_power_state.substate)
 644		return 0;
 645
 646	return hda_dsp_set_power_state(sdev, target_state);
 647}
 648
 649/*
 650 * Audio DSP states may transform as below:-
 651 *
 652 *                                         Opportunistic D0I3 in S0
 653 *     Runtime    +---------------------+  Delayed D0i3 work timeout
 654 *     suspend    |                     +--------------------+
 655 *   +------------+       D0I0(active)  |                    |
 656 *   |            |                     <---------------+    |
 657 *   |   +-------->                     |    New IPC	|    |
 658 *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
 659 *   |   |resume     |  |         |  |			|    |
 660 *   |   |           |  |         |  |			|    |
 661 *   |   |     System|  |         |  |			|    |
 662 *   |   |     resume|  | S3/S0IX |  |                  |    |
 663 *   |   |	     |  | suspend |  | S0IX             |    |
 664 *   |   |           |  |         |  |suspend           |    |
 665 *   |   |           |  |         |  |                  |    |
 666 *   |   |           |  |         |  |                  |    |
 667 * +-v---+-----------+--v-------+ |  |           +------+----v----+
 668 * |                            | |  +----------->                |
 669 * |       D3 (suspended)       | |              |      D0I3      |
 670 * |                            | +--------------+                |
 671 * |                            |  System resume |                |
 672 * +----------------------------+		 +----------------+
 673 *
 674 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
 675 *		 ignored the suspend trigger. Otherwise the DSP
 676 *		 is in D3.
 677 */
 678
 679static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 680{
 681	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 682	const struct sof_intel_dsp_desc *chip = hda->desc;
 683	struct hdac_bus *bus = sof_to_bus(sdev);
 684	int ret, j;
 685
 686	/*
 687	 * The memory used for IMR boot loses its content in deeper than S3 state
 688	 * We must not try IMR boot on next power up (as it will fail).
 689	 *
 690	 * In case of firmware crash or boot failure set the skip_imr_boot to true
 691	 * as well in order to try to re-load the firmware to do a 'cold' boot.
 692	 */
 693	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
 694	    sdev->fw_state == SOF_FW_CRASHED ||
 695	    sdev->fw_state == SOF_FW_BOOT_FAILED)
 696		hda->skip_imr_boot = true;
 697
 698	ret = chip->disable_interrupts(sdev);
 699	if (ret < 0)
 700		return ret;
 701
 702	/* make sure that no irq handler is pending before shutdown */
 703	synchronize_irq(sdev->ipc_irq);
 704
 705	hda_codec_jack_wake_enable(sdev, runtime_suspend);
 706
 707	/* power down all hda links */
 708	hda_bus_ml_suspend(bus);
 709
 710	if (sdev->dspless_mode_selected)
 711		goto skip_dsp;
 712
 713	ret = chip->power_down_dsp(sdev);
 714	if (ret < 0) {
 715		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
 716		return ret;
 717	}
 718
 719	/* reset ref counts for all cores */
 720	for (j = 0; j < chip->cores_num; j++)
 721		sdev->dsp_core_ref_count[j] = 0;
 722
 723	/* disable ppcap interrupt */
 724	hda_dsp_ctrl_ppcap_enable(sdev, false);
 725	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
 726skip_dsp:
 727
 728	/* disable hda bus irq and streams */
 729	hda_dsp_ctrl_stop_chip(sdev);
 730
 731	/* disable LP retention mode */
 732	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
 733				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
 734
 735	/* reset controller */
 736	ret = hda_dsp_ctrl_link_reset(sdev, true);
 737	if (ret < 0) {
 738		dev_err(sdev->dev,
 739			"error: failed to reset controller during suspend\n");
 740		return ret;
 741	}
 742
 743	/* display codec can powered off after link reset */
 744	hda_codec_i915_display_power(sdev, false);
 745
 746	return 0;
 747}
 748
 749static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 750{
 751	int ret;
 752
 753	/* display codec must be powered before link reset */
 754	hda_codec_i915_display_power(sdev, true);
 755
 756	/*
 757	 * clear TCSEL to clear playback on some HD Audio
 758	 * codecs. PCI TCSEL is defined in the Intel manuals.
 759	 */
 760	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
 761
 762	/* reset and start hda controller */
 763	ret = hda_dsp_ctrl_init_chip(sdev);
 764	if (ret < 0) {
 765		dev_err(sdev->dev,
 766			"error: failed to start controller after resume\n");
 767		goto cleanup;
 768	}
 769
 770	/* check jack status */
 771	if (runtime_resume) {
 772		hda_codec_jack_wake_enable(sdev, false);
 773		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
 774			hda_codec_jack_check(sdev);
 775	}
 776
 777	if (!sdev->dspless_mode_selected) {
 778		/* enable ppcap interrupt */
 779		hda_dsp_ctrl_ppcap_enable(sdev, true);
 780		hda_dsp_ctrl_ppcap_int_enable(sdev, true);
 781	}
 782
 783cleanup:
 784	/* display codec can powered off after controller init */
 785	hda_codec_i915_display_power(sdev, false);
 786
 787	return 0;
 788}
 789
 790int hda_dsp_resume(struct snd_sof_dev *sdev)
 791{
 792	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 793	struct hdac_bus *bus = sof_to_bus(sdev);
 794	struct pci_dev *pci = to_pci_dev(sdev->dev);
 795	const struct sof_dsp_power_state target_state = {
 796		.state = SOF_DSP_PM_D0,
 797		.substate = SOF_HDA_DSP_PM_D0I0,
 798	};
 799	int ret;
 800
 801	/* resume from D0I3 */
 802	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
 803		ret = hda_bus_ml_resume(bus);
 804		if (ret < 0) {
 805			dev_err(sdev->dev,
 806				"error %d in %s: failed to power up links",
 807				ret, __func__);
 808			return ret;
 809		}
 810
 811		/* set up CORB/RIRB buffers if was on before suspend */
 812		hda_codec_resume_cmd_io(sdev);
 813
 814		/* Set DSP power state */
 815		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
 816		if (ret < 0) {
 817			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 818				target_state.state, target_state.substate);
 819			return ret;
 820		}
 821
 822		/* restore L1SEN bit */
 823		if (hda->l1_disabled)
 824			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
 825						HDA_VS_INTEL_EM2,
 826						HDA_VS_INTEL_EM2_L1SEN, 0);
 827
 828		/* restore and disable the system wakeup */
 829		pci_restore_state(pci);
 830		disable_irq_wake(pci->irq);
 831		return 0;
 832	}
 833
 834	/* init hda controller. DSP cores will be powered up during fw boot */
 835	ret = hda_resume(sdev, false);
 836	if (ret < 0)
 837		return ret;
 838
 839	return snd_sof_dsp_set_power_state(sdev, &target_state);
 840}
 841
 842int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
 843{
 844	const struct sof_dsp_power_state target_state = {
 845		.state = SOF_DSP_PM_D0,
 846	};
 847	int ret;
 848
 849	/* init hda controller. DSP cores will be powered up during fw boot */
 850	ret = hda_resume(sdev, true);
 851	if (ret < 0)
 852		return ret;
 853
 854	return snd_sof_dsp_set_power_state(sdev, &target_state);
 855}
 856
 857int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
 858{
 859	struct hdac_bus *hbus = sof_to_bus(sdev);
 860
 861	if (hbus->codec_powered) {
 862		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
 863			(unsigned int)hbus->codec_powered);
 864		return -EBUSY;
 865	}
 866
 867	return 0;
 868}
 869
 870int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
 871{
 872	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 873	const struct sof_dsp_power_state target_state = {
 874		.state = SOF_DSP_PM_D3,
 875	};
 876	int ret;
 877
 878	if (!sdev->dspless_mode_selected) {
 879		/* cancel any attempt for DSP D0I3 */
 880		cancel_delayed_work_sync(&hda->d0i3_work);
 881	}
 882
 883	/* stop hda controller and power dsp off */
 884	ret = hda_suspend(sdev, true);
 885	if (ret < 0)
 886		return ret;
 887
 888	return snd_sof_dsp_set_power_state(sdev, &target_state);
 889}
 890
 891int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
 892{
 893	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 894	struct hdac_bus *bus = sof_to_bus(sdev);
 895	struct pci_dev *pci = to_pci_dev(sdev->dev);
 896	const struct sof_dsp_power_state target_dsp_state = {
 897		.state = target_state,
 898		.substate = target_state == SOF_DSP_PM_D0 ?
 899				SOF_HDA_DSP_PM_D0I3 : 0,
 900	};
 901	int ret;
 902
 903	if (!sdev->dspless_mode_selected) {
 904		/* cancel any attempt for DSP D0I3 */
 905		cancel_delayed_work_sync(&hda->d0i3_work);
 906	}
 907
 908	if (target_state == SOF_DSP_PM_D0) {
 909		/* Set DSP power state */
 910		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 911		if (ret < 0) {
 912			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 913				target_dsp_state.state,
 914				target_dsp_state.substate);
 915			return ret;
 916		}
 917
 918		/* enable L1SEN to make sure the system can enter S0Ix */
 919		if (hda->l1_disabled)
 920			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
 921						HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
 
 
 922
 923		/* stop the CORB/RIRB DMA if it is On */
 924		hda_codec_suspend_cmd_io(sdev);
 925
 926		/* no link can be powered in s0ix state */
 927		ret = hda_bus_ml_suspend(bus);
 928		if (ret < 0) {
 929			dev_err(sdev->dev,
 930				"error %d in %s: failed to power down links",
 931				ret, __func__);
 932			return ret;
 933		}
 934
 935		/* enable the system waking up via IPC IRQ */
 936		enable_irq_wake(pci->irq);
 937		pci_save_state(pci);
 938		return 0;
 939	}
 940
 941	/* stop hda controller and power dsp off */
 942	ret = hda_suspend(sdev, false);
 943	if (ret < 0) {
 944		dev_err(bus->dev, "error: suspending dsp\n");
 945		return ret;
 946	}
 947
 948	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 949}
 950
 951static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
 952{
 953	struct hdac_bus *bus = sof_to_bus(sdev);
 954	struct hdac_stream *s;
 955	unsigned int active_streams = 0;
 956	int sd_offset;
 957	u32 val;
 958
 959	list_for_each_entry(s, &bus->stream_list, list) {
 960		sd_offset = SOF_STREAM_SD_OFFSET(s);
 961		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
 962				       sd_offset);
 963		if (val & SOF_HDA_SD_CTL_DMA_START)
 964			active_streams |= BIT(s->index);
 965	}
 966
 967	return active_streams;
 968}
 969
 970static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
 971{
 972	int ret;
 973
 974	/*
 975	 * Do not assume a certain timing between the prior
 976	 * suspend flow, and running of this quirk function.
 977	 * This is needed if the controller was just put
 978	 * to reset before calling this function.
 979	 */
 980	usleep_range(500, 1000);
 981
 982	/*
 983	 * Take controller out of reset to flush DMA
 984	 * transactions.
 985	 */
 986	ret = hda_dsp_ctrl_link_reset(sdev, false);
 987	if (ret < 0)
 988		return ret;
 989
 990	usleep_range(500, 1000);
 991
 992	/* Restore state for shutdown, back to reset */
 993	ret = hda_dsp_ctrl_link_reset(sdev, true);
 994	if (ret < 0)
 995		return ret;
 996
 997	return ret;
 998}
 999
1000int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
1001{
1002	unsigned int active_streams;
1003	int ret, ret2;
1004
1005	/* check if DMA cleanup has been successful */
1006	active_streams = hda_dsp_check_for_dma_streams(sdev);
1007
1008	sdev->system_suspend_target = SOF_SUSPEND_S3;
1009	ret = snd_sof_suspend(sdev->dev);
1010
1011	if (active_streams) {
1012		dev_warn(sdev->dev,
1013			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
1014			 active_streams);
1015		ret2 = hda_dsp_s5_quirk(sdev);
1016		if (ret2 < 0)
1017			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
1018	}
1019
1020	return ret;
1021}
1022
1023int hda_dsp_shutdown(struct snd_sof_dev *sdev)
1024{
1025	sdev->system_suspend_target = SOF_SUSPEND_S3;
1026	return snd_sof_suspend(sdev->dev);
1027}
1028
1029int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
1030{
1031	int ret;
1032
1033	/* make sure all DAI resources are freed */
1034	ret = hda_dsp_dais_suspend(sdev);
1035	if (ret < 0)
1036		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1037
1038	return ret;
1039}
1040
1041void hda_dsp_d0i3_work(struct work_struct *work)
1042{
1043	struct sof_intel_hda_dev *hdev = container_of(work,
1044						      struct sof_intel_hda_dev,
1045						      d0i3_work.work);
1046	struct hdac_bus *bus = &hdev->hbus.core;
1047	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1048	struct sof_dsp_power_state target_state = {
1049		.state = SOF_DSP_PM_D0,
1050		.substate = SOF_HDA_DSP_PM_D0I3,
1051	};
1052	int ret;
1053
1054	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1055	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1056		/* remain in D0I0 */
1057		return;
1058
1059	/* This can fail but error cannot be propagated */
1060	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1061	if (ret < 0)
1062		dev_err_ratelimited(sdev->dev,
1063				    "error: failed to set DSP state %d substate %d\n",
1064				    target_state.state, target_state.substate);
1065}
1066
1067int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1068{
1069	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1070	int ret, ret1;
1071
1072	/* power up core */
1073	ret = hda_dsp_enable_core(sdev, BIT(core));
1074	if (ret < 0) {
1075		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1076			core, ret);
1077		return ret;
1078	}
1079
1080	/* No need to send IPC for primary core or if FW boot is not complete */
1081	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1082		return 0;
1083
1084	/* No need to continue the set_core_state ops is not available */
1085	if (!pm_ops->set_core_state)
1086		return 0;
1087
1088	/* Now notify DSP for secondary cores */
1089	ret = pm_ops->set_core_state(sdev, core, true);
1090	if (ret < 0) {
1091		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1092			core, ret);
1093		goto power_down;
1094	}
1095
1096	return ret;
1097
1098power_down:
1099	/* power down core if it is host managed and return the original error if this fails too */
1100	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1101	if (ret1 < 0)
1102		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1103
1104	return ret;
1105}
1106
1107int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1108{
1109	hda_sdw_int_enable(sdev, false);
1110	hda_dsp_ipc_int_disable(sdev);
1111
1112	return 0;
1113}