Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2//
   3// This file is provided under a dual BSD/GPLv2 license.  When using or
   4// redistributing this file, you may do so under either license.
   5//
   6// Copyright(c) 2018 Intel Corporation. All rights reserved.
   7//
   8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
   9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
  10//	    Rander Wang <rander.wang@intel.com>
  11//          Keyon Jie <yang.jie@linux.intel.com>
  12//
  13
  14/*
  15 * Hardware interface for generic Intel audio DSP HDA IP
  16 */
  17
  18#include <linux/module.h>
  19#include <sound/hdaudio_ext.h>
  20#include <sound/hda_register.h>
  21#include <sound/hda-mlink.h>
  22#include <trace/events/sof_intel.h>
  23#include "../sof-audio.h"
  24#include "../ops.h"
  25#include "hda.h"
  26#include "hda-ipc.h"
  27
  28static bool hda_enable_trace_D0I3_S0;
  29#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
  30module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
  31MODULE_PARM_DESC(enable_trace_D0I3_S0,
  32		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
  33#endif
  34
  35/*
  36 * DSP Core control.
  37 */
  38
  39static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
  40{
  41	u32 adspcs;
  42	u32 reset;
  43	int ret;
  44
  45	/* set reset bits for cores */
  46	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  47	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  48					 HDA_DSP_REG_ADSPCS,
  49					 reset, reset);
  50
  51	/* poll with timeout to check if operation successful */
  52	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  53					HDA_DSP_REG_ADSPCS, adspcs,
  54					((adspcs & reset) == reset),
  55					HDA_DSP_REG_POLL_INTERVAL_US,
  56					HDA_DSP_RESET_TIMEOUT_US);
  57	if (ret < 0) {
  58		dev_err(sdev->dev,
  59			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
  60			__func__);
  61		return ret;
  62	}
  63
  64	/* has core entered reset ? */
  65	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
  66				  HDA_DSP_REG_ADSPCS);
  67	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
  68		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
  69		dev_err(sdev->dev,
  70			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
  71			core_mask, adspcs);
  72		ret = -EIO;
  73	}
  74
  75	return ret;
  76}
  77
  78static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
  79{
  80	unsigned int crst;
  81	u32 adspcs;
  82	int ret;
  83
  84	/* clear reset bits for cores */
  85	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  86					 HDA_DSP_REG_ADSPCS,
  87					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
  88					 0);
  89
  90	/* poll with timeout to check if operation successful */
  91	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  92	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  93					    HDA_DSP_REG_ADSPCS, adspcs,
  94					    !(adspcs & crst),
  95					    HDA_DSP_REG_POLL_INTERVAL_US,
  96					    HDA_DSP_RESET_TIMEOUT_US);
  97
  98	if (ret < 0) {
  99		dev_err(sdev->dev,
 100			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 101			__func__);
 102		return ret;
 103	}
 104
 105	/* has core left reset ? */
 106	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 107				  HDA_DSP_REG_ADSPCS);
 108	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
 109		dev_err(sdev->dev,
 110			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
 111			core_mask, adspcs);
 112		ret = -EIO;
 113	}
 114
 115	return ret;
 116}
 117
 118int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
 119{
 120	/* stall core */
 121	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 122					 HDA_DSP_REG_ADSPCS,
 123					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 124					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 125
 126	/* set reset state */
 127	return hda_dsp_core_reset_enter(sdev, core_mask);
 128}
 129
 130bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
 131{
 132	int val;
 133	bool is_enable;
 134
 135	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 136
 137#define MASK_IS_EQUAL(v, m, field) ({	\
 138	u32 _m = field(m);		\
 139	((v) & _m) == _m;		\
 140})
 141
 142	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
 143		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
 144		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
 145		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 146
 147#undef MASK_IS_EQUAL
 148
 149	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
 150		is_enable, core_mask);
 151
 152	return is_enable;
 153}
 154
 155int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
 156{
 157	int ret;
 158
 159	/* leave reset state */
 160	ret = hda_dsp_core_reset_leave(sdev, core_mask);
 161	if (ret < 0)
 162		return ret;
 163
 164	/* run core */
 165	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
 166	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 167					 HDA_DSP_REG_ADSPCS,
 168					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 169					 0);
 170
 171	/* is core now running ? */
 172	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
 173		hda_dsp_core_stall_reset(sdev, core_mask);
 174		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
 175			core_mask);
 176		ret = -EIO;
 177	}
 178
 179	return ret;
 180}
 181
 182/*
 183 * Power Management.
 184 */
 185
 186int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
 187{
 188	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 189	const struct sof_intel_dsp_desc *chip = hda->desc;
 190	unsigned int cpa;
 191	u32 adspcs;
 192	int ret;
 193
 194	/* restrict core_mask to host managed cores mask */
 195	core_mask &= chip->host_managed_cores_mask;
 196	/* return if core_mask is not valid */
 197	if (!core_mask)
 198		return 0;
 199
 200	/* update bits */
 201	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
 202				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
 203				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
 204
 205	/* poll with timeout to check if operation successful */
 206	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
 207	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 208					    HDA_DSP_REG_ADSPCS, adspcs,
 209					    (adspcs & cpa) == cpa,
 210					    HDA_DSP_REG_POLL_INTERVAL_US,
 211					    HDA_DSP_RESET_TIMEOUT_US);
 212	if (ret < 0) {
 213		dev_err(sdev->dev,
 214			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 215			__func__);
 216		return ret;
 217	}
 218
 219	/* did core power up ? */
 220	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 221				  HDA_DSP_REG_ADSPCS);
 222	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
 223		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
 224		dev_err(sdev->dev,
 225			"error: power up core failed core_mask %xadspcs 0x%x\n",
 226			core_mask, adspcs);
 227		ret = -EIO;
 228	}
 229
 230	return ret;
 231}
 232
 233static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
 234{
 235	u32 adspcs;
 236	int ret;
 237
 238	/* update bits */
 239	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 240					 HDA_DSP_REG_ADSPCS,
 241					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
 242
 243	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 244				HDA_DSP_REG_ADSPCS, adspcs,
 245				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
 246				HDA_DSP_REG_POLL_INTERVAL_US,
 247				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
 248	if (ret < 0)
 249		dev_err(sdev->dev,
 250			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 251			__func__);
 252
 253	return ret;
 254}
 255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
 257{
 258	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 259	const struct sof_intel_dsp_desc *chip = hda->desc;
 260	int ret;
 261
 262	/* restrict core_mask to host managed cores mask */
 263	core_mask &= chip->host_managed_cores_mask;
 264
 265	/* return if core_mask is not valid or cores are already enabled */
 266	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
 267		return 0;
 268
 269	/* power up */
 270	ret = hda_dsp_core_power_up(sdev, core_mask);
 271	if (ret < 0) {
 272		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
 273			core_mask);
 274		return ret;
 275	}
 276
 277	return hda_dsp_core_run(sdev, core_mask);
 278}
 279
 280int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
 281				  unsigned int core_mask)
 282{
 283	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 284	const struct sof_intel_dsp_desc *chip = hda->desc;
 285	int ret;
 286
 287	/* restrict core_mask to host managed cores mask */
 288	core_mask &= chip->host_managed_cores_mask;
 289
 290	/* return if core_mask is not valid */
 291	if (!core_mask)
 292		return 0;
 293
 294	/* place core in reset prior to power down */
 295	ret = hda_dsp_core_stall_reset(sdev, core_mask);
 296	if (ret < 0) {
 297		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
 298			core_mask);
 299		return ret;
 300	}
 301
 302	/* power down core */
 303	ret = hda_dsp_core_power_down(sdev, core_mask);
 304	if (ret < 0) {
 305		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
 306			core_mask, ret);
 307		return ret;
 308	}
 309
 310	/* make sure we are in OFF state */
 311	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
 312		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
 313			core_mask, ret);
 314		ret = -EIO;
 315	}
 316
 317	return ret;
 318}
 319
 320void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
 321{
 322	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 323	const struct sof_intel_dsp_desc *chip = hda->desc;
 324
 325	if (sdev->dspless_mode_selected)
 326		return;
 327
 328	/* enable IPC DONE and BUSY interrupts */
 329	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 330			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
 331			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
 332
 333	/* enable IPC interrupt */
 334	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 335				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
 336}
 337
 338void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
 339{
 340	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 341	const struct sof_intel_dsp_desc *chip = hda->desc;
 342
 343	if (sdev->dspless_mode_selected)
 344		return;
 345
 346	/* disable IPC interrupt */
 347	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 348				HDA_DSP_ADSPIC_IPC, 0);
 349
 350	/* disable IPC BUSY and DONE interrupt */
 351	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 352			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
 353}
 354
 355static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
 356{
 
 357	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
 358	struct snd_sof_pdata *pdata = sdev->pdata;
 359	const struct sof_intel_dsp_desc *chip;
 360
 361	chip = get_chip_info(pdata);
 362	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
 363		SOF_HDA_VS_D0I3C_CIP) {
 364		if (!retry--)
 365			return -ETIMEDOUT;
 366		usleep_range(10, 15);
 367	}
 368
 369	return 0;
 370}
 371
 372static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
 373{
 374	const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
 
 375
 376	if (pm_ops && pm_ops->set_pm_gate)
 377		return pm_ops->set_pm_gate(sdev, flags);
 378
 379	return 0;
 
 
 
 
 
 
 
 
 380}
 381
 382static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
 383{
 384	struct snd_sof_pdata *pdata = sdev->pdata;
 385	const struct sof_intel_dsp_desc *chip;
 386	int ret;
 387	u8 reg;
 388
 389	chip = get_chip_info(pdata);
 390
 391	/* Write to D0I3C after Command-In-Progress bit is cleared */
 392	ret = hda_dsp_wait_d0i3c_done(sdev);
 393	if (ret < 0) {
 394		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
 395		return ret;
 396	}
 397
 398	/* Update D0I3C register */
 399	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
 400			    SOF_HDA_VS_D0I3C_I3, value);
 401
 402	/*
 403	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
 404	 * A delay is recommended before checking if D0I3C::CIP is cleared
 405	 */
 406	usleep_range(30, 40);
 407
 408	/* Wait for cmd in progress to be cleared before exiting the function */
 409	ret = hda_dsp_wait_d0i3c_done(sdev);
 410	if (ret < 0) {
 411		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
 412		return ret;
 413	}
 414
 415	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
 416	/* Confirm d0i3 state changed with paranoia check */
 417	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
 418		dev_err(sdev->dev, "failed to update D0I3C!\n");
 419		return -EIO;
 420	}
 421
 422	trace_sof_intel_D0I3C_updated(sdev, reg);
 423
 424	return 0;
 425}
 426
 427/*
 428 * d0i3 streaming is enabled if all the active streams can
 429 * work in d0i3 state and playback is enabled
 430 */
 431static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
 432{
 433	struct snd_pcm_substream *substream;
 434	struct snd_sof_pcm *spcm;
 435	bool playback_active = false;
 436	int dir;
 437
 438	list_for_each_entry(spcm, &sdev->pcm_list, list) {
 439		for_each_pcm_streams(dir) {
 440			substream = spcm->stream[dir].substream;
 441			if (!substream || !substream->runtime)
 442				continue;
 443
 444			if (!spcm->stream[dir].d0i3_compatible)
 445				return false;
 446
 447			if (dir == SNDRV_PCM_STREAM_PLAYBACK)
 448				playback_active = true;
 449		}
 450	}
 451
 452	return playback_active;
 453}
 454
 455static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
 456				const struct sof_dsp_power_state *target_state)
 457{
 458	u32 flags = 0;
 459	int ret;
 460	u8 value = 0;
 461
 462	/*
 463	 * Sanity check for illegal state transitions
 464	 * The only allowed transitions are:
 465	 * 1. D3 -> D0I0
 466	 * 2. D0I0 -> D0I3
 467	 * 3. D0I3 -> D0I0
 468	 */
 469	switch (sdev->dsp_power_state.state) {
 470	case SOF_DSP_PM_D0:
 471		/* Follow the sequence below for D0 substate transitions */
 472		break;
 473	case SOF_DSP_PM_D3:
 474		/* Follow regular flow for D3 -> D0 transition */
 475		return 0;
 476	default:
 477		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
 478			sdev->dsp_power_state.state, target_state->state);
 479		return -EINVAL;
 480	}
 481
 482	/* Set flags and register value for D0 target substate */
 483	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
 484		value = SOF_HDA_VS_D0I3C_I3;
 485
 486		/*
 487		 * Trace DMA need to be disabled when the DSP enters
 488		 * D0I3 for S0Ix suspend, but it can be kept enabled
 489		 * when the DSP enters D0I3 while the system is in S0
 490		 * for debug purpose.
 491		 */
 492		if (!sdev->fw_trace_is_supported ||
 493		    !hda_enable_trace_D0I3_S0 ||
 494		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
 495			flags = HDA_PM_NO_DMA_TRACE;
 496
 497		if (hda_dsp_d0i3_streaming_applicable(sdev))
 498			flags |= HDA_PM_PG_STREAMING;
 499	} else {
 500		/* prevent power gating in D0I0 */
 501		flags = HDA_PM_PPG;
 502	}
 503
 504	/* update D0I3C register */
 505	ret = hda_dsp_update_d0i3c_register(sdev, value);
 506	if (ret < 0)
 507		return ret;
 508
 509	/*
 510	 * Notify the DSP of the state change.
 511	 * If this IPC fails, revert the D0I3C register update in order
 512	 * to prevent partial state change.
 513	 */
 514	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
 515	if (ret < 0) {
 516		dev_err(sdev->dev,
 517			"error: PM_GATE ipc error %d\n", ret);
 518		goto revert;
 519	}
 520
 521	return ret;
 522
 523revert:
 524	/* fallback to the previous register value */
 525	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
 526
 527	/*
 528	 * This can fail but return the IPC error to signal that
 529	 * the state change failed.
 530	 */
 531	hda_dsp_update_d0i3c_register(sdev, value);
 532
 533	return ret;
 534}
 535
 536/* helper to log DSP state */
 537static void hda_dsp_state_log(struct snd_sof_dev *sdev)
 538{
 539	switch (sdev->dsp_power_state.state) {
 540	case SOF_DSP_PM_D0:
 541		switch (sdev->dsp_power_state.substate) {
 542		case SOF_HDA_DSP_PM_D0I0:
 543			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
 544			break;
 545		case SOF_HDA_DSP_PM_D0I3:
 546			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
 547			break;
 548		default:
 549			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
 550				sdev->dsp_power_state.substate);
 551			break;
 552		}
 553		break;
 554	case SOF_DSP_PM_D1:
 555		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
 556		break;
 557	case SOF_DSP_PM_D2:
 558		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
 559		break;
 
 
 
 560	case SOF_DSP_PM_D3:
 561		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
 562		break;
 
 
 
 563	default:
 564		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
 565			sdev->dsp_power_state.state);
 566		break;
 567	}
 568}
 569
 570/*
 571 * All DSP power state transitions are initiated by the driver.
 572 * If the requested state change fails, the error is simply returned.
 573 * Further state transitions are attempted only when the set_power_save() op
 574 * is called again either because of a new IPC sent to the DSP or
 575 * during system suspend/resume.
 576 */
 577static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
 578				   const struct sof_dsp_power_state *target_state)
 579{
 580	int ret = 0;
 581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582	switch (target_state->state) {
 583	case SOF_DSP_PM_D0:
 584		ret = hda_dsp_set_D0_state(sdev, target_state);
 585		break;
 586	case SOF_DSP_PM_D3:
 587		/* The only allowed transition is: D0I0 -> D3 */
 588		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
 589		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
 590			break;
 591
 592		dev_err(sdev->dev,
 593			"error: transition from %d to %d not allowed\n",
 594			sdev->dsp_power_state.state, target_state->state);
 595		return -EINVAL;
 596	default:
 597		dev_err(sdev->dev, "error: target state unsupported %d\n",
 598			target_state->state);
 599		return -EINVAL;
 600	}
 601	if (ret < 0) {
 602		dev_err(sdev->dev,
 603			"failed to set requested target DSP state %d substate %d\n",
 604			target_state->state, target_state->substate);
 605		return ret;
 606	}
 607
 608	sdev->dsp_power_state = *target_state;
 609	hda_dsp_state_log(sdev);
 610	return ret;
 611}
 612
 613int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
 614				 const struct sof_dsp_power_state *target_state)
 615{
 616	/*
 617	 * When the DSP is already in D0I3 and the target state is D0I3,
 618	 * it could be the case that the DSP is in D0I3 during S0
 619	 * and the system is suspending to S0Ix. Therefore,
 620	 * hda_dsp_set_D0_state() must be called to disable trace DMA
 621	 * by sending the PM_GATE IPC to the FW.
 622	 */
 623	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
 624	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
 625		return hda_dsp_set_power_state(sdev, target_state);
 626
 627	/*
 628	 * For all other cases, return without doing anything if
 629	 * the DSP is already in the target state.
 630	 */
 631	if (target_state->state == sdev->dsp_power_state.state &&
 632	    target_state->substate == sdev->dsp_power_state.substate)
 633		return 0;
 634
 635	return hda_dsp_set_power_state(sdev, target_state);
 636}
 637
 638int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
 639				 const struct sof_dsp_power_state *target_state)
 640{
 641	/* Return without doing anything if the DSP is already in the target state */
 642	if (target_state->state == sdev->dsp_power_state.state &&
 643	    target_state->substate == sdev->dsp_power_state.substate)
 644		return 0;
 645
 646	return hda_dsp_set_power_state(sdev, target_state);
 647}
 648
 649/*
 650 * Audio DSP states may transform as below:-
 651 *
 652 *                                         Opportunistic D0I3 in S0
 653 *     Runtime    +---------------------+  Delayed D0i3 work timeout
 654 *     suspend    |                     +--------------------+
 655 *   +------------+       D0I0(active)  |                    |
 656 *   |            |                     <---------------+    |
 657 *   |   +-------->                     |    New IPC	|    |
 658 *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
 659 *   |   |resume     |  |         |  |			|    |
 660 *   |   |           |  |         |  |			|    |
 661 *   |   |     System|  |         |  |			|    |
 662 *   |   |     resume|  | S3/S0IX |  |                  |    |
 663 *   |   |	     |  | suspend |  | S0IX             |    |
 664 *   |   |           |  |         |  |suspend           |    |
 665 *   |   |           |  |         |  |                  |    |
 666 *   |   |           |  |         |  |                  |    |
 667 * +-v---+-----------+--v-------+ |  |           +------+----v----+
 668 * |                            | |  +----------->                |
 669 * |       D3 (suspended)       | |              |      D0I3      |
 670 * |                            | +--------------+                |
 671 * |                            |  System resume |                |
 672 * +----------------------------+		 +----------------+
 673 *
 674 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
 675 *		 ignored the suspend trigger. Otherwise the DSP
 676 *		 is in D3.
 677 */
 678
 679static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 680{
 681	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 682	const struct sof_intel_dsp_desc *chip = hda->desc;
 
 683	struct hdac_bus *bus = sof_to_bus(sdev);
 684	int ret, j;
 685
 686	/*
 687	 * The memory used for IMR boot loses its content in deeper than S3 state
 688	 * We must not try IMR boot on next power up (as it will fail).
 689	 *
 690	 * In case of firmware crash or boot failure set the skip_imr_boot to true
 691	 * as well in order to try to re-load the firmware to do a 'cold' boot.
 692	 */
 693	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
 694	    sdev->fw_state == SOF_FW_CRASHED ||
 695	    sdev->fw_state == SOF_FW_BOOT_FAILED)
 696		hda->skip_imr_boot = true;
 697
 698	ret = chip->disable_interrupts(sdev);
 699	if (ret < 0)
 700		return ret;
 701
 702	/* make sure that no irq handler is pending before shutdown */
 703	synchronize_irq(sdev->ipc_irq);
 704
 705	hda_codec_jack_wake_enable(sdev, runtime_suspend);
 
 706
 707	/* power down all hda links */
 708	hda_bus_ml_suspend(bus);
 
 709
 710	if (sdev->dspless_mode_selected)
 711		goto skip_dsp;
 
 712
 713	ret = chip->power_down_dsp(sdev);
 
 714	if (ret < 0) {
 715		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
 
 716		return ret;
 717	}
 718
 719	/* reset ref counts for all cores */
 720	for (j = 0; j < chip->cores_num; j++)
 721		sdev->dsp_core_ref_count[j] = 0;
 722
 723	/* disable ppcap interrupt */
 724	hda_dsp_ctrl_ppcap_enable(sdev, false);
 725	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
 726skip_dsp:
 727
 728	/* disable hda bus irq and streams */
 729	hda_dsp_ctrl_stop_chip(sdev);
 730
 731	/* disable LP retention mode */
 732	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
 733				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
 734
 735	/* reset controller */
 736	ret = hda_dsp_ctrl_link_reset(sdev, true);
 737	if (ret < 0) {
 738		dev_err(sdev->dev,
 739			"error: failed to reset controller during suspend\n");
 740		return ret;
 741	}
 742
 743	/* display codec can powered off after link reset */
 744	hda_codec_i915_display_power(sdev, false);
 745
 746	return 0;
 747}
 748
 749static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 750{
 
 
 
 
 751	int ret;
 752
 753	/* display codec must be powered before link reset */
 754	hda_codec_i915_display_power(sdev, true);
 755
 756	/*
 757	 * clear TCSEL to clear playback on some HD Audio
 758	 * codecs. PCI TCSEL is defined in the Intel manuals.
 759	 */
 760	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
 761
 762	/* reset and start hda controller */
 763	ret = hda_dsp_ctrl_init_chip(sdev);
 764	if (ret < 0) {
 765		dev_err(sdev->dev,
 766			"error: failed to start controller after resume\n");
 767		goto cleanup;
 768	}
 769
 
 770	/* check jack status */
 771	if (runtime_resume) {
 772		hda_codec_jack_wake_enable(sdev, false);
 773		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
 774			hda_codec_jack_check(sdev);
 775	}
 776
 777	if (!sdev->dspless_mode_selected) {
 778		/* enable ppcap interrupt */
 779		hda_dsp_ctrl_ppcap_enable(sdev, true);
 780		hda_dsp_ctrl_ppcap_int_enable(sdev, true);
 781	}
 782
 
 
 
 
 
 
 
 
 
 783cleanup:
 784	/* display codec can powered off after controller init */
 785	hda_codec_i915_display_power(sdev, false);
 786
 787	return 0;
 788}
 789
 790int hda_dsp_resume(struct snd_sof_dev *sdev)
 791{
 792	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 793	struct hdac_bus *bus = sof_to_bus(sdev);
 794	struct pci_dev *pci = to_pci_dev(sdev->dev);
 795	const struct sof_dsp_power_state target_state = {
 796		.state = SOF_DSP_PM_D0,
 797		.substate = SOF_HDA_DSP_PM_D0I0,
 798	};
 
 
 
 
 799	int ret;
 800
 801	/* resume from D0I3 */
 802	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
 803		ret = hda_bus_ml_resume(bus);
 804		if (ret < 0) {
 805			dev_err(sdev->dev,
 806				"error %d in %s: failed to power up links",
 807				ret, __func__);
 808			return ret;
 
 
 
 
 
 
 809		}
 810
 811		/* set up CORB/RIRB buffers if was on before suspend */
 812		hda_codec_resume_cmd_io(sdev);
 
 
 813
 814		/* Set DSP power state */
 815		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
 816		if (ret < 0) {
 817			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 818				target_state.state, target_state.substate);
 819			return ret;
 820		}
 821
 822		/* restore L1SEN bit */
 823		if (hda->l1_disabled)
 824			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
 825						HDA_VS_INTEL_EM2,
 826						HDA_VS_INTEL_EM2_L1SEN, 0);
 827
 828		/* restore and disable the system wakeup */
 829		pci_restore_state(pci);
 830		disable_irq_wake(pci->irq);
 831		return 0;
 832	}
 833
 834	/* init hda controller. DSP cores will be powered up during fw boot */
 835	ret = hda_resume(sdev, false);
 836	if (ret < 0)
 837		return ret;
 838
 839	return snd_sof_dsp_set_power_state(sdev, &target_state);
 840}
 841
 842int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
 843{
 844	const struct sof_dsp_power_state target_state = {
 845		.state = SOF_DSP_PM_D0,
 846	};
 847	int ret;
 848
 849	/* init hda controller. DSP cores will be powered up during fw boot */
 850	ret = hda_resume(sdev, true);
 851	if (ret < 0)
 852		return ret;
 853
 854	return snd_sof_dsp_set_power_state(sdev, &target_state);
 855}
 856
 857int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
 858{
 859	struct hdac_bus *hbus = sof_to_bus(sdev);
 860
 861	if (hbus->codec_powered) {
 862		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
 863			(unsigned int)hbus->codec_powered);
 864		return -EBUSY;
 865	}
 866
 867	return 0;
 868}
 869
 870int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
 871{
 872	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 873	const struct sof_dsp_power_state target_state = {
 874		.state = SOF_DSP_PM_D3,
 875	};
 876	int ret;
 877
 878	if (!sdev->dspless_mode_selected) {
 879		/* cancel any attempt for DSP D0I3 */
 880		cancel_delayed_work_sync(&hda->d0i3_work);
 881	}
 882
 883	/* stop hda controller and power dsp off */
 884	ret = hda_suspend(sdev, true);
 885	if (ret < 0)
 886		return ret;
 887
 888	return snd_sof_dsp_set_power_state(sdev, &target_state);
 889}
 890
 891int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
 892{
 893	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 894	struct hdac_bus *bus = sof_to_bus(sdev);
 895	struct pci_dev *pci = to_pci_dev(sdev->dev);
 896	const struct sof_dsp_power_state target_dsp_state = {
 897		.state = target_state,
 898		.substate = target_state == SOF_DSP_PM_D0 ?
 899				SOF_HDA_DSP_PM_D0I3 : 0,
 900	};
 901	int ret;
 902
 903	if (!sdev->dspless_mode_selected) {
 904		/* cancel any attempt for DSP D0I3 */
 905		cancel_delayed_work_sync(&hda->d0i3_work);
 906	}
 907
 908	if (target_state == SOF_DSP_PM_D0) {
 909		/* Set DSP power state */
 910		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 911		if (ret < 0) {
 912			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 913				target_dsp_state.state,
 914				target_dsp_state.substate);
 915			return ret;
 916		}
 917
 918		/* enable L1SEN to make sure the system can enter S0Ix */
 919		if (hda->l1_disabled)
 920			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
 921						HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
 
 
 922
 
 923		/* stop the CORB/RIRB DMA if it is On */
 924		hda_codec_suspend_cmd_io(sdev);
 
 925
 926		/* no link can be powered in s0ix state */
 927		ret = hda_bus_ml_suspend(bus);
 928		if (ret < 0) {
 929			dev_err(sdev->dev,
 930				"error %d in %s: failed to power down links",
 931				ret, __func__);
 932			return ret;
 933		}
 
 934
 935		/* enable the system waking up via IPC IRQ */
 936		enable_irq_wake(pci->irq);
 937		pci_save_state(pci);
 938		return 0;
 939	}
 940
 941	/* stop hda controller and power dsp off */
 942	ret = hda_suspend(sdev, false);
 943	if (ret < 0) {
 944		dev_err(bus->dev, "error: suspending dsp\n");
 945		return ret;
 946	}
 947
 948	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 949}
 950
 951static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
 952{
 953	struct hdac_bus *bus = sof_to_bus(sdev);
 954	struct hdac_stream *s;
 955	unsigned int active_streams = 0;
 956	int sd_offset;
 957	u32 val;
 958
 959	list_for_each_entry(s, &bus->stream_list, list) {
 960		sd_offset = SOF_STREAM_SD_OFFSET(s);
 961		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
 962				       sd_offset);
 963		if (val & SOF_HDA_SD_CTL_DMA_START)
 964			active_streams |= BIT(s->index);
 965	}
 966
 967	return active_streams;
 968}
 969
 970static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
 971{
 972	int ret;
 973
 974	/*
 975	 * Do not assume a certain timing between the prior
 976	 * suspend flow, and running of this quirk function.
 977	 * This is needed if the controller was just put
 978	 * to reset before calling this function.
 979	 */
 980	usleep_range(500, 1000);
 981
 982	/*
 983	 * Take controller out of reset to flush DMA
 984	 * transactions.
 985	 */
 986	ret = hda_dsp_ctrl_link_reset(sdev, false);
 987	if (ret < 0)
 988		return ret;
 989
 990	usleep_range(500, 1000);
 991
 992	/* Restore state for shutdown, back to reset */
 993	ret = hda_dsp_ctrl_link_reset(sdev, true);
 994	if (ret < 0)
 995		return ret;
 996
 997	return ret;
 998}
 999
1000int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
1001{
1002	unsigned int active_streams;
1003	int ret, ret2;
1004
1005	/* check if DMA cleanup has been successful */
1006	active_streams = hda_dsp_check_for_dma_streams(sdev);
1007
1008	sdev->system_suspend_target = SOF_SUSPEND_S3;
1009	ret = snd_sof_suspend(sdev->dev);
1010
1011	if (active_streams) {
1012		dev_warn(sdev->dev,
1013			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
1014			 active_streams);
1015		ret2 = hda_dsp_s5_quirk(sdev);
1016		if (ret2 < 0)
1017			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
1018	}
1019
1020	return ret;
1021}
1022
1023int hda_dsp_shutdown(struct snd_sof_dev *sdev)
1024{
1025	sdev->system_suspend_target = SOF_SUSPEND_S3;
1026	return snd_sof_suspend(sdev->dev);
1027}
1028
1029int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
1030{
1031	int ret;
 
 
 
 
 
 
 
1032
1033	/* make sure all DAI resources are freed */
1034	ret = hda_dsp_dais_suspend(sdev);
1035	if (ret < 0)
1036		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1037
1038	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039}
1040
1041void hda_dsp_d0i3_work(struct work_struct *work)
1042{
1043	struct sof_intel_hda_dev *hdev = container_of(work,
1044						      struct sof_intel_hda_dev,
1045						      d0i3_work.work);
1046	struct hdac_bus *bus = &hdev->hbus.core;
1047	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1048	struct sof_dsp_power_state target_state = {
1049		.state = SOF_DSP_PM_D0,
1050		.substate = SOF_HDA_DSP_PM_D0I3,
1051	};
1052	int ret;
1053
1054	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1055	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1056		/* remain in D0I0 */
1057		return;
1058
1059	/* This can fail but error cannot be propagated */
1060	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1061	if (ret < 0)
1062		dev_err_ratelimited(sdev->dev,
1063				    "error: failed to set DSP state %d substate %d\n",
1064				    target_state.state, target_state.substate);
1065}
1066
1067int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1068{
1069	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1070	int ret, ret1;
1071
1072	/* power up core */
1073	ret = hda_dsp_enable_core(sdev, BIT(core));
1074	if (ret < 0) {
1075		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1076			core, ret);
1077		return ret;
1078	}
1079
1080	/* No need to send IPC for primary core or if FW boot is not complete */
1081	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1082		return 0;
1083
1084	/* No need to continue the set_core_state ops is not available */
1085	if (!pm_ops->set_core_state)
1086		return 0;
1087
1088	/* Now notify DSP for secondary cores */
1089	ret = pm_ops->set_core_state(sdev, core, true);
1090	if (ret < 0) {
1091		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1092			core, ret);
1093		goto power_down;
1094	}
1095
1096	return ret;
1097
1098power_down:
1099	/* power down core if it is host managed and return the original error if this fails too */
1100	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1101	if (ret1 < 0)
1102		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1103
1104	return ret;
1105}
1106
1107int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1108{
1109	hda_sdw_int_enable(sdev, false);
1110	hda_dsp_ipc_int_disable(sdev);
1111
1112	return 0;
1113}
v5.14.15
  1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
  2//
  3// This file is provided under a dual BSD/GPLv2 license.  When using or
  4// redistributing this file, you may do so under either license.
  5//
  6// Copyright(c) 2018 Intel Corporation. All rights reserved.
  7//
  8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
  9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
 10//	    Rander Wang <rander.wang@intel.com>
 11//          Keyon Jie <yang.jie@linux.intel.com>
 12//
 13
 14/*
 15 * Hardware interface for generic Intel audio DSP HDA IP
 16 */
 17
 18#include <linux/module.h>
 19#include <sound/hdaudio_ext.h>
 20#include <sound/hda_register.h>
 
 
 21#include "../sof-audio.h"
 22#include "../ops.h"
 23#include "hda.h"
 24#include "hda-ipc.h"
 25
 26static bool hda_enable_trace_D0I3_S0;
 27#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
 28module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
 29MODULE_PARM_DESC(enable_trace_D0I3_S0,
 30		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
 31#endif
 32
 33/*
 34 * DSP Core control.
 35 */
 36
 37int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
 38{
 39	u32 adspcs;
 40	u32 reset;
 41	int ret;
 42
 43	/* set reset bits for cores */
 44	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
 45	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 46					 HDA_DSP_REG_ADSPCS,
 47					 reset, reset);
 48
 49	/* poll with timeout to check if operation successful */
 50	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 51					HDA_DSP_REG_ADSPCS, adspcs,
 52					((adspcs & reset) == reset),
 53					HDA_DSP_REG_POLL_INTERVAL_US,
 54					HDA_DSP_RESET_TIMEOUT_US);
 55	if (ret < 0) {
 56		dev_err(sdev->dev,
 57			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 58			__func__);
 59		return ret;
 60	}
 61
 62	/* has core entered reset ? */
 63	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 64				  HDA_DSP_REG_ADSPCS);
 65	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
 66		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
 67		dev_err(sdev->dev,
 68			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
 69			core_mask, adspcs);
 70		ret = -EIO;
 71	}
 72
 73	return ret;
 74}
 75
 76int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
 77{
 78	unsigned int crst;
 79	u32 adspcs;
 80	int ret;
 81
 82	/* clear reset bits for cores */
 83	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 84					 HDA_DSP_REG_ADSPCS,
 85					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
 86					 0);
 87
 88	/* poll with timeout to check if operation successful */
 89	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
 90	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 91					    HDA_DSP_REG_ADSPCS, adspcs,
 92					    !(adspcs & crst),
 93					    HDA_DSP_REG_POLL_INTERVAL_US,
 94					    HDA_DSP_RESET_TIMEOUT_US);
 95
 96	if (ret < 0) {
 97		dev_err(sdev->dev,
 98			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 99			__func__);
100		return ret;
101	}
102
103	/* has core left reset ? */
104	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
105				  HDA_DSP_REG_ADSPCS);
106	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
107		dev_err(sdev->dev,
108			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
109			core_mask, adspcs);
110		ret = -EIO;
111	}
112
113	return ret;
114}
115
116int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
117{
118	/* stall core */
119	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
120					 HDA_DSP_REG_ADSPCS,
121					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
122					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
123
124	/* set reset state */
125	return hda_dsp_core_reset_enter(sdev, core_mask);
126}
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
129{
130	int ret;
131
132	/* leave reset state */
133	ret = hda_dsp_core_reset_leave(sdev, core_mask);
134	if (ret < 0)
135		return ret;
136
137	/* run core */
138	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
139	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
140					 HDA_DSP_REG_ADSPCS,
141					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
142					 0);
143
144	/* is core now running ? */
145	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
146		hda_dsp_core_stall_reset(sdev, core_mask);
147		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
148			core_mask);
149		ret = -EIO;
150	}
151
152	return ret;
153}
154
155/*
156 * Power Management.
157 */
158
159int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
160{
 
 
161	unsigned int cpa;
162	u32 adspcs;
163	int ret;
164
 
 
 
 
 
 
165	/* update bits */
166	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
167				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
168				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
169
170	/* poll with timeout to check if operation successful */
171	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
172	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
173					    HDA_DSP_REG_ADSPCS, adspcs,
174					    (adspcs & cpa) == cpa,
175					    HDA_DSP_REG_POLL_INTERVAL_US,
176					    HDA_DSP_RESET_TIMEOUT_US);
177	if (ret < 0) {
178		dev_err(sdev->dev,
179			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
180			__func__);
181		return ret;
182	}
183
184	/* did core power up ? */
185	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
186				  HDA_DSP_REG_ADSPCS);
187	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
188		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
189		dev_err(sdev->dev,
190			"error: power up core failed core_mask %xadspcs 0x%x\n",
191			core_mask, adspcs);
192		ret = -EIO;
193	}
194
195	return ret;
196}
197
198int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
199{
200	u32 adspcs;
201	int ret;
202
203	/* update bits */
204	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
205					 HDA_DSP_REG_ADSPCS,
206					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
207
208	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
209				HDA_DSP_REG_ADSPCS, adspcs,
210				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
211				HDA_DSP_REG_POLL_INTERVAL_US,
212				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
213	if (ret < 0)
214		dev_err(sdev->dev,
215			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
216			__func__);
217
218	return ret;
219}
220
221bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
222			     unsigned int core_mask)
223{
224	int val;
225	bool is_enable;
226
227	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
228
229#define MASK_IS_EQUAL(v, m, field) ({	\
230	u32 _m = field(m);		\
231	((v) & _m) == _m;		\
232})
233
234	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
235		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
236		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
237		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
238
239#undef MASK_IS_EQUAL
240
241	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
242		is_enable, core_mask);
243
244	return is_enable;
245}
246
247int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
248{
249	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
250	const struct sof_intel_dsp_desc *chip = hda->desc;
251	int ret;
252
253	/* restrict core_mask to host managed cores mask */
254	core_mask &= chip->host_managed_cores_mask;
255
256	/* return if core_mask is not valid or cores are already enabled */
257	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
258		return 0;
259
260	/* power up */
261	ret = hda_dsp_core_power_up(sdev, core_mask);
262	if (ret < 0) {
263		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
264			core_mask);
265		return ret;
266	}
267
268	return hda_dsp_core_run(sdev, core_mask);
269}
270
271int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
272				  unsigned int core_mask)
273{
274	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
275	const struct sof_intel_dsp_desc *chip = hda->desc;
276	int ret;
277
278	/* restrict core_mask to host managed cores mask */
279	core_mask &= chip->host_managed_cores_mask;
280
281	/* return if core_mask is not valid */
282	if (!core_mask)
283		return 0;
284
285	/* place core in reset prior to power down */
286	ret = hda_dsp_core_stall_reset(sdev, core_mask);
287	if (ret < 0) {
288		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
289			core_mask);
290		return ret;
291	}
292
293	/* power down core */
294	ret = hda_dsp_core_power_down(sdev, core_mask);
295	if (ret < 0) {
296		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
297			core_mask, ret);
298		return ret;
299	}
300
301	/* make sure we are in OFF state */
302	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
303		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
304			core_mask, ret);
305		ret = -EIO;
306	}
307
308	return ret;
309}
310
311void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
312{
313	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
314	const struct sof_intel_dsp_desc *chip = hda->desc;
315
 
 
 
316	/* enable IPC DONE and BUSY interrupts */
317	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
318			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
319			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
320
321	/* enable IPC interrupt */
322	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
323				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
324}
325
326void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
327{
328	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
329	const struct sof_intel_dsp_desc *chip = hda->desc;
330
 
 
 
331	/* disable IPC interrupt */
332	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
333				HDA_DSP_ADSPIC_IPC, 0);
334
335	/* disable IPC BUSY and DONE interrupt */
336	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
337			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
338}
339
340static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
341{
342	struct hdac_bus *bus = sof_to_bus(sdev);
343	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
 
 
344
345	while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
 
 
346		if (!retry--)
347			return -ETIMEDOUT;
348		usleep_range(10, 15);
349	}
350
351	return 0;
352}
353
354static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
355{
356	struct sof_ipc_pm_gate pm_gate;
357	struct sof_ipc_reply reply;
358
359	memset(&pm_gate, 0, sizeof(pm_gate));
 
360
361	/* configure pm_gate ipc message */
362	pm_gate.hdr.size = sizeof(pm_gate);
363	pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
364	pm_gate.flags = flags;
365
366	/* send pm_gate ipc to dsp */
367	return sof_ipc_tx_message_no_pm(sdev->ipc, pm_gate.hdr.cmd,
368					&pm_gate, sizeof(pm_gate), &reply,
369					sizeof(reply));
370}
371
372static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
373{
374	struct hdac_bus *bus = sof_to_bus(sdev);
 
375	int ret;
 
 
 
376
377	/* Write to D0I3C after Command-In-Progress bit is cleared */
378	ret = hda_dsp_wait_d0i3c_done(sdev);
379	if (ret < 0) {
380		dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
381		return ret;
382	}
383
384	/* Update D0I3C register */
385	snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
 
 
 
 
 
 
 
386
387	/* Wait for cmd in progress to be cleared before exiting the function */
388	ret = hda_dsp_wait_d0i3c_done(sdev);
389	if (ret < 0) {
390		dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
391		return ret;
392	}
393
394	dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
395		 snd_hdac_chip_readb(bus, VS_D0I3C));
 
 
 
 
 
 
396
397	return 0;
398}
399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
401				const struct sof_dsp_power_state *target_state)
402{
403	u32 flags = 0;
404	int ret;
405	u8 value = 0;
406
407	/*
408	 * Sanity check for illegal state transitions
409	 * The only allowed transitions are:
410	 * 1. D3 -> D0I0
411	 * 2. D0I0 -> D0I3
412	 * 3. D0I3 -> D0I0
413	 */
414	switch (sdev->dsp_power_state.state) {
415	case SOF_DSP_PM_D0:
416		/* Follow the sequence below for D0 substate transitions */
417		break;
418	case SOF_DSP_PM_D3:
419		/* Follow regular flow for D3 -> D0 transition */
420		return 0;
421	default:
422		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
423			sdev->dsp_power_state.state, target_state->state);
424		return -EINVAL;
425	}
426
427	/* Set flags and register value for D0 target substate */
428	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
429		value = SOF_HDA_VS_D0I3C_I3;
430
431		/*
432		 * Trace DMA need to be disabled when the DSP enters
433		 * D0I3 for S0Ix suspend, but it can be kept enabled
434		 * when the DSP enters D0I3 while the system is in S0
435		 * for debug purpose.
436		 */
437		if (!sdev->dtrace_is_supported ||
438		    !hda_enable_trace_D0I3_S0 ||
439		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
440			flags = HDA_PM_NO_DMA_TRACE;
 
 
 
441	} else {
442		/* prevent power gating in D0I0 */
443		flags = HDA_PM_PPG;
444	}
445
446	/* update D0I3C register */
447	ret = hda_dsp_update_d0i3c_register(sdev, value);
448	if (ret < 0)
449		return ret;
450
451	/*
452	 * Notify the DSP of the state change.
453	 * If this IPC fails, revert the D0I3C register update in order
454	 * to prevent partial state change.
455	 */
456	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
457	if (ret < 0) {
458		dev_err(sdev->dev,
459			"error: PM_GATE ipc error %d\n", ret);
460		goto revert;
461	}
462
463	return ret;
464
465revert:
466	/* fallback to the previous register value */
467	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
468
469	/*
470	 * This can fail but return the IPC error to signal that
471	 * the state change failed.
472	 */
473	hda_dsp_update_d0i3c_register(sdev, value);
474
475	return ret;
476}
477
478/* helper to log DSP state */
479static void hda_dsp_state_log(struct snd_sof_dev *sdev)
480{
481	switch (sdev->dsp_power_state.state) {
482	case SOF_DSP_PM_D0:
483		switch (sdev->dsp_power_state.substate) {
484		case SOF_HDA_DSP_PM_D0I0:
485			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
486			break;
487		case SOF_HDA_DSP_PM_D0I3:
488			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
489			break;
490		default:
491			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
492				sdev->dsp_power_state.substate);
493			break;
494		}
495		break;
496	case SOF_DSP_PM_D1:
497		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
498		break;
499	case SOF_DSP_PM_D2:
500		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
501		break;
502	case SOF_DSP_PM_D3_HOT:
503		dev_dbg(sdev->dev, "Current DSP power state: D3_HOT\n");
504		break;
505	case SOF_DSP_PM_D3:
506		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
507		break;
508	case SOF_DSP_PM_D3_COLD:
509		dev_dbg(sdev->dev, "Current DSP power state: D3_COLD\n");
510		break;
511	default:
512		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
513			sdev->dsp_power_state.state);
514		break;
515	}
516}
517
518/*
519 * All DSP power state transitions are initiated by the driver.
520 * If the requested state change fails, the error is simply returned.
521 * Further state transitions are attempted only when the set_power_save() op
522 * is called again either because of a new IPC sent to the DSP or
523 * during system suspend/resume.
524 */
525int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
526			    const struct sof_dsp_power_state *target_state)
527{
528	int ret = 0;
529
530	/*
531	 * When the DSP is already in D0I3 and the target state is D0I3,
532	 * it could be the case that the DSP is in D0I3 during S0
533	 * and the system is suspending to S0Ix. Therefore,
534	 * hda_dsp_set_D0_state() must be called to disable trace DMA
535	 * by sending the PM_GATE IPC to the FW.
536	 */
537	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
538	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
539		goto set_state;
540
541	/*
542	 * For all other cases, return without doing anything if
543	 * the DSP is already in the target state.
544	 */
545	if (target_state->state == sdev->dsp_power_state.state &&
546	    target_state->substate == sdev->dsp_power_state.substate)
547		return 0;
548
549set_state:
550	switch (target_state->state) {
551	case SOF_DSP_PM_D0:
552		ret = hda_dsp_set_D0_state(sdev, target_state);
553		break;
554	case SOF_DSP_PM_D3:
555		/* The only allowed transition is: D0I0 -> D3 */
556		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
557		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
558			break;
559
560		dev_err(sdev->dev,
561			"error: transition from %d to %d not allowed\n",
562			sdev->dsp_power_state.state, target_state->state);
563		return -EINVAL;
564	default:
565		dev_err(sdev->dev, "error: target state unsupported %d\n",
566			target_state->state);
567		return -EINVAL;
568	}
569	if (ret < 0) {
570		dev_err(sdev->dev,
571			"failed to set requested target DSP state %d substate %d\n",
572			target_state->state, target_state->substate);
573		return ret;
574	}
575
576	sdev->dsp_power_state = *target_state;
577	hda_dsp_state_log(sdev);
578	return ret;
579}
580
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581/*
582 * Audio DSP states may transform as below:-
583 *
584 *                                         Opportunistic D0I3 in S0
585 *     Runtime    +---------------------+  Delayed D0i3 work timeout
586 *     suspend    |                     +--------------------+
587 *   +------------+       D0I0(active)  |                    |
588 *   |            |                     <---------------+    |
589 *   |   +-------->                     |    New IPC	|    |
590 *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
591 *   |   |resume     |  |         |  |			|    |
592 *   |   |           |  |         |  |			|    |
593 *   |   |     System|  |         |  |			|    |
594 *   |   |     resume|  | S3/S0IX |  |                  |    |
595 *   |   |	     |  | suspend |  | S0IX             |    |
596 *   |   |           |  |         |  |suspend           |    |
597 *   |   |           |  |         |  |                  |    |
598 *   |   |           |  |         |  |                  |    |
599 * +-v---+-----------+--v-------+ |  |           +------+----v----+
600 * |                            | |  +----------->                |
601 * |       D3 (suspended)       | |              |      D0I3      |
602 * |                            | +--------------+                |
603 * |                            |  System resume |                |
604 * +----------------------------+		 +----------------+
605 *
606 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
607 *		 ignored the suspend trigger. Otherwise the DSP
608 *		 is in D3.
609 */
610
611static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
612{
613	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
614	const struct sof_intel_dsp_desc *chip = hda->desc;
615#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
616	struct hdac_bus *bus = sof_to_bus(sdev);
617#endif
618	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619
620	hda_sdw_int_enable(sdev, false);
 
621
622	/* disable IPC interrupts */
623	hda_dsp_ipc_int_disable(sdev);
624
625#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
626	if (runtime_suspend)
627		hda_codec_jack_wake_enable(sdev, true);
628
629	/* power down all hda link */
630	snd_hdac_ext_bus_link_power_down_all(bus);
631#endif
632
633	/* power down DSP */
634	ret = snd_sof_dsp_core_power_down(sdev, chip->host_managed_cores_mask);
635	if (ret < 0) {
636		dev_err(sdev->dev,
637			"error: failed to power down core during suspend\n");
638		return ret;
639	}
640
 
 
 
 
641	/* disable ppcap interrupt */
642	hda_dsp_ctrl_ppcap_enable(sdev, false);
643	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
 
644
645	/* disable hda bus irq and streams */
646	hda_dsp_ctrl_stop_chip(sdev);
647
648	/* disable LP retention mode */
649	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
650				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
651
652	/* reset controller */
653	ret = hda_dsp_ctrl_link_reset(sdev, true);
654	if (ret < 0) {
655		dev_err(sdev->dev,
656			"error: failed to reset controller during suspend\n");
657		return ret;
658	}
659
660	/* display codec can powered off after link reset */
661	hda_codec_i915_display_power(sdev, false);
662
663	return 0;
664}
665
666static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
667{
668#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
669	struct hdac_bus *bus = sof_to_bus(sdev);
670	struct hdac_ext_link *hlink = NULL;
671#endif
672	int ret;
673
674	/* display codec must be powered before link reset */
675	hda_codec_i915_display_power(sdev, true);
676
677	/*
678	 * clear TCSEL to clear playback on some HD Audio
679	 * codecs. PCI TCSEL is defined in the Intel manuals.
680	 */
681	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
682
683	/* reset and start hda controller */
684	ret = hda_dsp_ctrl_init_chip(sdev, true);
685	if (ret < 0) {
686		dev_err(sdev->dev,
687			"error: failed to start controller after resume\n");
688		goto cleanup;
689	}
690
691#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
692	/* check jack status */
693	if (runtime_resume) {
694		hda_codec_jack_wake_enable(sdev, false);
695		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
696			hda_codec_jack_check(sdev);
697	}
698
699	/* turn off the links that were off before suspend */
700	list_for_each_entry(hlink, &bus->hlink_list, list) {
701		if (!hlink->ref_count)
702			snd_hdac_ext_bus_link_power_down(hlink);
703	}
704
705	/* check dma status and clean up CORB/RIRB buffers */
706	if (!bus->cmd_dma_state)
707		snd_hdac_bus_stop_cmd_io(bus);
708#endif
709
710	/* enable ppcap interrupt */
711	hda_dsp_ctrl_ppcap_enable(sdev, true);
712	hda_dsp_ctrl_ppcap_int_enable(sdev, true);
713
714cleanup:
715	/* display codec can powered off after controller init */
716	hda_codec_i915_display_power(sdev, false);
717
718	return 0;
719}
720
721int hda_dsp_resume(struct snd_sof_dev *sdev)
722{
723	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 
724	struct pci_dev *pci = to_pci_dev(sdev->dev);
725	const struct sof_dsp_power_state target_state = {
726		.state = SOF_DSP_PM_D0,
727		.substate = SOF_HDA_DSP_PM_D0I0,
728	};
729#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
730	struct hdac_bus *bus = sof_to_bus(sdev);
731	struct hdac_ext_link *hlink = NULL;
732#endif
733	int ret;
734
735	/* resume from D0I3 */
736	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
737#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
738		/* power up links that were active before suspend */
739		list_for_each_entry(hlink, &bus->hlink_list, list) {
740			if (hlink->ref_count) {
741				ret = snd_hdac_ext_bus_link_power_up(hlink);
742				if (ret < 0) {
743					dev_dbg(sdev->dev,
744						"error %d in %s: failed to power up links",
745						ret, __func__);
746					return ret;
747				}
748			}
749		}
750
751		/* set up CORB/RIRB buffers if was on before suspend */
752		if (bus->cmd_dma_state)
753			snd_hdac_bus_init_cmd_io(bus);
754#endif
755
756		/* Set DSP power state */
757		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
758		if (ret < 0) {
759			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
760				target_state.state, target_state.substate);
761			return ret;
762		}
763
764		/* restore L1SEN bit */
765		if (hda->l1_support_changed)
766			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
767						HDA_VS_INTEL_EM2,
768						HDA_VS_INTEL_EM2_L1SEN, 0);
769
770		/* restore and disable the system wakeup */
771		pci_restore_state(pci);
772		disable_irq_wake(pci->irq);
773		return 0;
774	}
775
776	/* init hda controller. DSP cores will be powered up during fw boot */
777	ret = hda_resume(sdev, false);
778	if (ret < 0)
779		return ret;
780
781	return snd_sof_dsp_set_power_state(sdev, &target_state);
782}
783
784int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
785{
786	const struct sof_dsp_power_state target_state = {
787		.state = SOF_DSP_PM_D0,
788	};
789	int ret;
790
791	/* init hda controller. DSP cores will be powered up during fw boot */
792	ret = hda_resume(sdev, true);
793	if (ret < 0)
794		return ret;
795
796	return snd_sof_dsp_set_power_state(sdev, &target_state);
797}
798
799int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
800{
801	struct hdac_bus *hbus = sof_to_bus(sdev);
802
803	if (hbus->codec_powered) {
804		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
805			(unsigned int)hbus->codec_powered);
806		return -EBUSY;
807	}
808
809	return 0;
810}
811
812int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
813{
814	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
815	const struct sof_dsp_power_state target_state = {
816		.state = SOF_DSP_PM_D3,
817	};
818	int ret;
819
820	/* cancel any attempt for DSP D0I3 */
821	cancel_delayed_work_sync(&hda->d0i3_work);
 
 
822
823	/* stop hda controller and power dsp off */
824	ret = hda_suspend(sdev, true);
825	if (ret < 0)
826		return ret;
827
828	return snd_sof_dsp_set_power_state(sdev, &target_state);
829}
830
831int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
832{
833	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
834	struct hdac_bus *bus = sof_to_bus(sdev);
835	struct pci_dev *pci = to_pci_dev(sdev->dev);
836	const struct sof_dsp_power_state target_dsp_state = {
837		.state = target_state,
838		.substate = target_state == SOF_DSP_PM_D0 ?
839				SOF_HDA_DSP_PM_D0I3 : 0,
840	};
841	int ret;
842
843	/* cancel any attempt for DSP D0I3 */
844	cancel_delayed_work_sync(&hda->d0i3_work);
 
 
845
846	if (target_state == SOF_DSP_PM_D0) {
847		/* Set DSP power state */
848		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
849		if (ret < 0) {
850			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
851				target_dsp_state.state,
852				target_dsp_state.substate);
853			return ret;
854		}
855
856		/* enable L1SEN to make sure the system can enter S0Ix */
857		hda->l1_support_changed =
858			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
859						HDA_VS_INTEL_EM2,
860						HDA_VS_INTEL_EM2_L1SEN,
861						HDA_VS_INTEL_EM2_L1SEN);
862
863#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
864		/* stop the CORB/RIRB DMA if it is On */
865		if (bus->cmd_dma_state)
866			snd_hdac_bus_stop_cmd_io(bus);
867
868		/* no link can be powered in s0ix state */
869		ret = snd_hdac_ext_bus_link_power_down_all(bus);
870		if (ret < 0) {
871			dev_dbg(sdev->dev,
872				"error %d in %s: failed to power down links",
873				ret, __func__);
874			return ret;
875		}
876#endif
877
878		/* enable the system waking up via IPC IRQ */
879		enable_irq_wake(pci->irq);
880		pci_save_state(pci);
881		return 0;
882	}
883
884	/* stop hda controller and power dsp off */
885	ret = hda_suspend(sdev, false);
886	if (ret < 0) {
887		dev_err(bus->dev, "error: suspending dsp\n");
888		return ret;
889	}
890
891	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
892}
893
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
894int hda_dsp_shutdown(struct snd_sof_dev *sdev)
895{
896	sdev->system_suspend_target = SOF_SUSPEND_S3;
897	return snd_sof_suspend(sdev->dev);
898}
899
900int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
901{
902#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
903	struct hdac_bus *bus = sof_to_bus(sdev);
904	struct snd_soc_pcm_runtime *rtd;
905	struct hdac_ext_stream *stream;
906	struct hdac_ext_link *link;
907	struct hdac_stream *s;
908	const char *name;
909	int stream_tag;
910
911	/* set internal flag for BE */
912	list_for_each_entry(s, &bus->stream_list, list) {
913		stream = stream_to_hdac_ext_stream(s);
 
914
915		/*
916		 * clear stream. This should already be taken care for running
917		 * streams when the SUSPEND trigger is called. But paused
918		 * streams do not get suspended, so this needs to be done
919		 * explicitly during suspend.
920		 */
921		if (stream->link_substream) {
922			rtd = asoc_substream_to_rtd(stream->link_substream);
923			name = asoc_rtd_to_codec(rtd, 0)->component->name;
924			link = snd_hdac_ext_bus_get_link(bus, name);
925			if (!link)
926				return -EINVAL;
927
928			stream->link_prepared = 0;
929
930			if (hdac_stream(stream)->direction ==
931				SNDRV_PCM_STREAM_CAPTURE)
932				continue;
933
934			stream_tag = hdac_stream(stream)->stream_tag;
935			snd_hdac_ext_link_clear_stream_id(link, stream_tag);
936		}
937	}
938#endif
939	return 0;
940}
941
942void hda_dsp_d0i3_work(struct work_struct *work)
943{
944	struct sof_intel_hda_dev *hdev = container_of(work,
945						      struct sof_intel_hda_dev,
946						      d0i3_work.work);
947	struct hdac_bus *bus = &hdev->hbus.core;
948	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
949	struct sof_dsp_power_state target_state = {
950		.state = SOF_DSP_PM_D0,
951		.substate = SOF_HDA_DSP_PM_D0I3,
952	};
953	int ret;
954
955	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
956	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
957		/* remain in D0I0 */
958		return;
959
960	/* This can fail but error cannot be propagated */
961	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
962	if (ret < 0)
963		dev_err_ratelimited(sdev->dev,
964				    "error: failed to set DSP state %d substate %d\n",
965				    target_state.state, target_state.substate);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966}