Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2//
   3// This file is provided under a dual BSD/GPLv2 license.  When using or
   4// redistributing this file, you may do so under either license.
   5//
   6// Copyright(c) 2018 Intel Corporation. All rights reserved.
   7//
   8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
   9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
  10//	    Rander Wang <rander.wang@intel.com>
  11//          Keyon Jie <yang.jie@linux.intel.com>
  12//
  13
  14/*
  15 * Hardware interface for generic Intel audio DSP HDA IP
  16 */
  17
  18#include <linux/module.h>
  19#include <sound/hdaudio_ext.h>
  20#include <sound/hda_register.h>
  21#include <sound/hda-mlink.h>
  22#include <trace/events/sof_intel.h>
 
  23#include "../sof-audio.h"
  24#include "../ops.h"
  25#include "hda.h"
 
  26#include "hda-ipc.h"
  27
 
 
 
 
 
 
 
 
  28static bool hda_enable_trace_D0I3_S0;
  29#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
  30module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
  31MODULE_PARM_DESC(enable_trace_D0I3_S0,
  32		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
  33#endif
  34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  35/*
  36 * DSP Core control.
  37 */
  38
  39static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
  40{
  41	u32 adspcs;
  42	u32 reset;
  43	int ret;
  44
  45	/* set reset bits for cores */
  46	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  47	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  48					 HDA_DSP_REG_ADSPCS,
  49					 reset, reset);
  50
  51	/* poll with timeout to check if operation successful */
  52	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  53					HDA_DSP_REG_ADSPCS, adspcs,
  54					((adspcs & reset) == reset),
  55					HDA_DSP_REG_POLL_INTERVAL_US,
  56					HDA_DSP_RESET_TIMEOUT_US);
  57	if (ret < 0) {
  58		dev_err(sdev->dev,
  59			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
  60			__func__);
  61		return ret;
  62	}
  63
  64	/* has core entered reset ? */
  65	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
  66				  HDA_DSP_REG_ADSPCS);
  67	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
  68		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
  69		dev_err(sdev->dev,
  70			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
  71			core_mask, adspcs);
  72		ret = -EIO;
  73	}
  74
  75	return ret;
  76}
  77
  78static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
  79{
  80	unsigned int crst;
  81	u32 adspcs;
  82	int ret;
  83
  84	/* clear reset bits for cores */
  85	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
  86					 HDA_DSP_REG_ADSPCS,
  87					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
  88					 0);
  89
  90	/* poll with timeout to check if operation successful */
  91	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
  92	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
  93					    HDA_DSP_REG_ADSPCS, adspcs,
  94					    !(adspcs & crst),
  95					    HDA_DSP_REG_POLL_INTERVAL_US,
  96					    HDA_DSP_RESET_TIMEOUT_US);
  97
  98	if (ret < 0) {
  99		dev_err(sdev->dev,
 100			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 101			__func__);
 102		return ret;
 103	}
 104
 105	/* has core left reset ? */
 106	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 107				  HDA_DSP_REG_ADSPCS);
 108	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
 109		dev_err(sdev->dev,
 110			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
 111			core_mask, adspcs);
 112		ret = -EIO;
 113	}
 114
 115	return ret;
 116}
 117
 118int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
 119{
 120	/* stall core */
 121	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 122					 HDA_DSP_REG_ADSPCS,
 123					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 124					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 125
 126	/* set reset state */
 127	return hda_dsp_core_reset_enter(sdev, core_mask);
 128}
 
 129
 130bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
 131{
 132	int val;
 133	bool is_enable;
 134
 135	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 136
 137#define MASK_IS_EQUAL(v, m, field) ({	\
 138	u32 _m = field(m);		\
 139	((v) & _m) == _m;		\
 140})
 141
 142	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
 143		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
 144		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
 145		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 146
 147#undef MASK_IS_EQUAL
 148
 149	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
 150		is_enable, core_mask);
 151
 152	return is_enable;
 153}
 
 154
 155int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
 156{
 157	int ret;
 158
 159	/* leave reset state */
 160	ret = hda_dsp_core_reset_leave(sdev, core_mask);
 161	if (ret < 0)
 162		return ret;
 163
 164	/* run core */
 165	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
 166	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 167					 HDA_DSP_REG_ADSPCS,
 168					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 169					 0);
 170
 171	/* is core now running ? */
 172	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
 173		hda_dsp_core_stall_reset(sdev, core_mask);
 174		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
 175			core_mask);
 176		ret = -EIO;
 177	}
 178
 179	return ret;
 180}
 
 181
 182/*
 183 * Power Management.
 184 */
 185
 186int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
 187{
 188	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 189	const struct sof_intel_dsp_desc *chip = hda->desc;
 190	unsigned int cpa;
 191	u32 adspcs;
 192	int ret;
 193
 194	/* restrict core_mask to host managed cores mask */
 195	core_mask &= chip->host_managed_cores_mask;
 196	/* return if core_mask is not valid */
 197	if (!core_mask)
 198		return 0;
 199
 200	/* update bits */
 201	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
 202				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
 203				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
 204
 205	/* poll with timeout to check if operation successful */
 206	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
 207	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 208					    HDA_DSP_REG_ADSPCS, adspcs,
 209					    (adspcs & cpa) == cpa,
 210					    HDA_DSP_REG_POLL_INTERVAL_US,
 211					    HDA_DSP_RESET_TIMEOUT_US);
 212	if (ret < 0) {
 213		dev_err(sdev->dev,
 214			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 215			__func__);
 216		return ret;
 217	}
 218
 219	/* did core power up ? */
 220	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 221				  HDA_DSP_REG_ADSPCS);
 222	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
 223		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
 224		dev_err(sdev->dev,
 225			"error: power up core failed core_mask %xadspcs 0x%x\n",
 226			core_mask, adspcs);
 227		ret = -EIO;
 228	}
 229
 230	return ret;
 231}
 
 232
 233static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
 234{
 235	u32 adspcs;
 236	int ret;
 237
 238	/* update bits */
 239	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 240					 HDA_DSP_REG_ADSPCS,
 241					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
 242
 243	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 244				HDA_DSP_REG_ADSPCS, adspcs,
 245				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
 246				HDA_DSP_REG_POLL_INTERVAL_US,
 247				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
 248	if (ret < 0)
 249		dev_err(sdev->dev,
 250			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 251			__func__);
 252
 253	return ret;
 254}
 255
 256int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
 257{
 258	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 259	const struct sof_intel_dsp_desc *chip = hda->desc;
 260	int ret;
 261
 262	/* restrict core_mask to host managed cores mask */
 263	core_mask &= chip->host_managed_cores_mask;
 264
 265	/* return if core_mask is not valid or cores are already enabled */
 266	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
 267		return 0;
 268
 269	/* power up */
 270	ret = hda_dsp_core_power_up(sdev, core_mask);
 271	if (ret < 0) {
 272		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
 273			core_mask);
 274		return ret;
 275	}
 276
 277	return hda_dsp_core_run(sdev, core_mask);
 278}
 
 279
 280int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
 281				  unsigned int core_mask)
 282{
 283	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 284	const struct sof_intel_dsp_desc *chip = hda->desc;
 285	int ret;
 286
 287	/* restrict core_mask to host managed cores mask */
 288	core_mask &= chip->host_managed_cores_mask;
 289
 290	/* return if core_mask is not valid */
 291	if (!core_mask)
 292		return 0;
 293
 294	/* place core in reset prior to power down */
 295	ret = hda_dsp_core_stall_reset(sdev, core_mask);
 296	if (ret < 0) {
 297		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
 298			core_mask);
 299		return ret;
 300	}
 301
 302	/* power down core */
 303	ret = hda_dsp_core_power_down(sdev, core_mask);
 304	if (ret < 0) {
 305		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
 306			core_mask, ret);
 307		return ret;
 308	}
 309
 310	/* make sure we are in OFF state */
 311	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
 312		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
 313			core_mask, ret);
 314		ret = -EIO;
 315	}
 316
 317	return ret;
 318}
 
 319
 320void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
 321{
 322	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 323	const struct sof_intel_dsp_desc *chip = hda->desc;
 324
 325	if (sdev->dspless_mode_selected)
 326		return;
 327
 328	/* enable IPC DONE and BUSY interrupts */
 329	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 330			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
 331			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
 332
 333	/* enable IPC interrupt */
 334	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 335				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
 336}
 
 337
 338void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
 339{
 340	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 341	const struct sof_intel_dsp_desc *chip = hda->desc;
 342
 343	if (sdev->dspless_mode_selected)
 344		return;
 345
 346	/* disable IPC interrupt */
 347	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 348				HDA_DSP_ADSPIC_IPC, 0);
 349
 350	/* disable IPC BUSY and DONE interrupt */
 351	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 352			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
 353}
 
 354
 355static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
 356{
 357	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
 358	struct snd_sof_pdata *pdata = sdev->pdata;
 359	const struct sof_intel_dsp_desc *chip;
 360
 361	chip = get_chip_info(pdata);
 362	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
 363		SOF_HDA_VS_D0I3C_CIP) {
 364		if (!retry--)
 365			return -ETIMEDOUT;
 366		usleep_range(10, 15);
 367	}
 368
 369	return 0;
 370}
 371
 372static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
 373{
 374	const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
 375
 376	if (pm_ops && pm_ops->set_pm_gate)
 377		return pm_ops->set_pm_gate(sdev, flags);
 378
 379	return 0;
 380}
 381
 382static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
 383{
 384	struct snd_sof_pdata *pdata = sdev->pdata;
 385	const struct sof_intel_dsp_desc *chip;
 386	int ret;
 387	u8 reg;
 388
 389	chip = get_chip_info(pdata);
 390
 391	/* Write to D0I3C after Command-In-Progress bit is cleared */
 392	ret = hda_dsp_wait_d0i3c_done(sdev);
 393	if (ret < 0) {
 394		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
 395		return ret;
 396	}
 397
 398	/* Update D0I3C register */
 399	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
 400			    SOF_HDA_VS_D0I3C_I3, value);
 401
 402	/*
 403	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
 404	 * A delay is recommended before checking if D0I3C::CIP is cleared
 405	 */
 406	usleep_range(30, 40);
 407
 408	/* Wait for cmd in progress to be cleared before exiting the function */
 409	ret = hda_dsp_wait_d0i3c_done(sdev);
 410	if (ret < 0) {
 411		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
 412		return ret;
 413	}
 414
 415	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
 416	/* Confirm d0i3 state changed with paranoia check */
 417	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
 418		dev_err(sdev->dev, "failed to update D0I3C!\n");
 419		return -EIO;
 420	}
 421
 422	trace_sof_intel_D0I3C_updated(sdev, reg);
 423
 424	return 0;
 425}
 426
 427/*
 428 * d0i3 streaming is enabled if all the active streams can
 429 * work in d0i3 state and playback is enabled
 430 */
 431static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
 432{
 433	struct snd_pcm_substream *substream;
 434	struct snd_sof_pcm *spcm;
 435	bool playback_active = false;
 436	int dir;
 437
 438	list_for_each_entry(spcm, &sdev->pcm_list, list) {
 439		for_each_pcm_streams(dir) {
 440			substream = spcm->stream[dir].substream;
 441			if (!substream || !substream->runtime)
 442				continue;
 443
 444			if (!spcm->stream[dir].d0i3_compatible)
 445				return false;
 446
 447			if (dir == SNDRV_PCM_STREAM_PLAYBACK)
 448				playback_active = true;
 449		}
 450	}
 451
 452	return playback_active;
 453}
 454
 455static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
 456				const struct sof_dsp_power_state *target_state)
 457{
 458	u32 flags = 0;
 459	int ret;
 460	u8 value = 0;
 461
 462	/*
 463	 * Sanity check for illegal state transitions
 464	 * The only allowed transitions are:
 465	 * 1. D3 -> D0I0
 466	 * 2. D0I0 -> D0I3
 467	 * 3. D0I3 -> D0I0
 468	 */
 469	switch (sdev->dsp_power_state.state) {
 470	case SOF_DSP_PM_D0:
 471		/* Follow the sequence below for D0 substate transitions */
 472		break;
 473	case SOF_DSP_PM_D3:
 474		/* Follow regular flow for D3 -> D0 transition */
 475		return 0;
 476	default:
 477		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
 478			sdev->dsp_power_state.state, target_state->state);
 479		return -EINVAL;
 480	}
 481
 482	/* Set flags and register value for D0 target substate */
 483	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
 484		value = SOF_HDA_VS_D0I3C_I3;
 485
 486		/*
 487		 * Trace DMA need to be disabled when the DSP enters
 488		 * D0I3 for S0Ix suspend, but it can be kept enabled
 489		 * when the DSP enters D0I3 while the system is in S0
 490		 * for debug purpose.
 491		 */
 492		if (!sdev->fw_trace_is_supported ||
 493		    !hda_enable_trace_D0I3_S0 ||
 494		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
 495			flags = HDA_PM_NO_DMA_TRACE;
 496
 497		if (hda_dsp_d0i3_streaming_applicable(sdev))
 498			flags |= HDA_PM_PG_STREAMING;
 499	} else {
 500		/* prevent power gating in D0I0 */
 501		flags = HDA_PM_PPG;
 502	}
 503
 504	/* update D0I3C register */
 505	ret = hda_dsp_update_d0i3c_register(sdev, value);
 506	if (ret < 0)
 507		return ret;
 508
 509	/*
 510	 * Notify the DSP of the state change.
 511	 * If this IPC fails, revert the D0I3C register update in order
 512	 * to prevent partial state change.
 513	 */
 514	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
 515	if (ret < 0) {
 516		dev_err(sdev->dev,
 517			"error: PM_GATE ipc error %d\n", ret);
 518		goto revert;
 519	}
 520
 521	return ret;
 522
 523revert:
 524	/* fallback to the previous register value */
 525	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
 526
 527	/*
 528	 * This can fail but return the IPC error to signal that
 529	 * the state change failed.
 530	 */
 531	hda_dsp_update_d0i3c_register(sdev, value);
 532
 533	return ret;
 534}
 535
 536/* helper to log DSP state */
 537static void hda_dsp_state_log(struct snd_sof_dev *sdev)
 538{
 539	switch (sdev->dsp_power_state.state) {
 540	case SOF_DSP_PM_D0:
 541		switch (sdev->dsp_power_state.substate) {
 542		case SOF_HDA_DSP_PM_D0I0:
 543			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
 544			break;
 545		case SOF_HDA_DSP_PM_D0I3:
 546			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
 547			break;
 548		default:
 549			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
 550				sdev->dsp_power_state.substate);
 551			break;
 552		}
 553		break;
 554	case SOF_DSP_PM_D1:
 555		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
 556		break;
 557	case SOF_DSP_PM_D2:
 558		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
 559		break;
 560	case SOF_DSP_PM_D3:
 561		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
 562		break;
 563	default:
 564		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
 565			sdev->dsp_power_state.state);
 566		break;
 567	}
 568}
 569
 570/*
 571 * All DSP power state transitions are initiated by the driver.
 572 * If the requested state change fails, the error is simply returned.
 573 * Further state transitions are attempted only when the set_power_save() op
 574 * is called again either because of a new IPC sent to the DSP or
 575 * during system suspend/resume.
 576 */
 577static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
 578				   const struct sof_dsp_power_state *target_state)
 579{
 580	int ret = 0;
 581
 582	switch (target_state->state) {
 583	case SOF_DSP_PM_D0:
 584		ret = hda_dsp_set_D0_state(sdev, target_state);
 585		break;
 586	case SOF_DSP_PM_D3:
 587		/* The only allowed transition is: D0I0 -> D3 */
 588		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
 589		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
 590			break;
 591
 592		dev_err(sdev->dev,
 593			"error: transition from %d to %d not allowed\n",
 594			sdev->dsp_power_state.state, target_state->state);
 595		return -EINVAL;
 596	default:
 597		dev_err(sdev->dev, "error: target state unsupported %d\n",
 598			target_state->state);
 599		return -EINVAL;
 600	}
 601	if (ret < 0) {
 602		dev_err(sdev->dev,
 603			"failed to set requested target DSP state %d substate %d\n",
 604			target_state->state, target_state->substate);
 605		return ret;
 606	}
 607
 608	sdev->dsp_power_state = *target_state;
 609	hda_dsp_state_log(sdev);
 610	return ret;
 611}
 612
 613int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
 614				 const struct sof_dsp_power_state *target_state)
 615{
 616	/*
 617	 * When the DSP is already in D0I3 and the target state is D0I3,
 618	 * it could be the case that the DSP is in D0I3 during S0
 619	 * and the system is suspending to S0Ix. Therefore,
 620	 * hda_dsp_set_D0_state() must be called to disable trace DMA
 621	 * by sending the PM_GATE IPC to the FW.
 622	 */
 623	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
 624	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
 625		return hda_dsp_set_power_state(sdev, target_state);
 626
 627	/*
 628	 * For all other cases, return without doing anything if
 629	 * the DSP is already in the target state.
 630	 */
 631	if (target_state->state == sdev->dsp_power_state.state &&
 632	    target_state->substate == sdev->dsp_power_state.substate)
 633		return 0;
 634
 635	return hda_dsp_set_power_state(sdev, target_state);
 636}
 
 637
 638int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
 639				 const struct sof_dsp_power_state *target_state)
 640{
 641	/* Return without doing anything if the DSP is already in the target state */
 642	if (target_state->state == sdev->dsp_power_state.state &&
 643	    target_state->substate == sdev->dsp_power_state.substate)
 644		return 0;
 645
 646	return hda_dsp_set_power_state(sdev, target_state);
 647}
 
 648
 649/*
 650 * Audio DSP states may transform as below:-
 651 *
 652 *                                         Opportunistic D0I3 in S0
 653 *     Runtime    +---------------------+  Delayed D0i3 work timeout
 654 *     suspend    |                     +--------------------+
 655 *   +------------+       D0I0(active)  |                    |
 656 *   |            |                     <---------------+    |
 657 *   |   +-------->                     |    New IPC	|    |
 658 *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
 659 *   |   |resume     |  |         |  |			|    |
 660 *   |   |           |  |         |  |			|    |
 661 *   |   |     System|  |         |  |			|    |
 662 *   |   |     resume|  | S3/S0IX |  |                  |    |
 663 *   |   |	     |  | suspend |  | S0IX             |    |
 664 *   |   |           |  |         |  |suspend           |    |
 665 *   |   |           |  |         |  |                  |    |
 666 *   |   |           |  |         |  |                  |    |
 667 * +-v---+-----------+--v-------+ |  |           +------+----v----+
 668 * |                            | |  +----------->                |
 669 * |       D3 (suspended)       | |              |      D0I3      |
 670 * |                            | +--------------+                |
 671 * |                            |  System resume |                |
 672 * +----------------------------+		 +----------------+
 673 *
 674 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
 675 *		 ignored the suspend trigger. Otherwise the DSP
 676 *		 is in D3.
 677 */
 678
 679static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 680{
 681	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 682	const struct sof_intel_dsp_desc *chip = hda->desc;
 683	struct hdac_bus *bus = sof_to_bus(sdev);
 
 684	int ret, j;
 685
 686	/*
 687	 * The memory used for IMR boot loses its content in deeper than S3 state
 688	 * We must not try IMR boot on next power up (as it will fail).
 689	 *
 
 
 
 
 
 
 
 
 
 
 690	 * In case of firmware crash or boot failure set the skip_imr_boot to true
 691	 * as well in order to try to re-load the firmware to do a 'cold' boot.
 692	 */
 693	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
 694	    sdev->fw_state == SOF_FW_CRASHED ||
 695	    sdev->fw_state == SOF_FW_BOOT_FAILED)
 696		hda->skip_imr_boot = true;
 697
 698	ret = chip->disable_interrupts(sdev);
 699	if (ret < 0)
 700		return ret;
 701
 702	/* make sure that no irq handler is pending before shutdown */
 703	synchronize_irq(sdev->ipc_irq);
 704
 705	hda_codec_jack_wake_enable(sdev, runtime_suspend);
 706
 707	/* power down all hda links */
 708	hda_bus_ml_suspend(bus);
 709
 710	if (sdev->dspless_mode_selected)
 711		goto skip_dsp;
 712
 713	ret = chip->power_down_dsp(sdev);
 714	if (ret < 0) {
 715		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
 716		return ret;
 717	}
 718
 719	/* reset ref counts for all cores */
 720	for (j = 0; j < chip->cores_num; j++)
 721		sdev->dsp_core_ref_count[j] = 0;
 722
 723	/* disable ppcap interrupt */
 724	hda_dsp_ctrl_ppcap_enable(sdev, false);
 725	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
 726skip_dsp:
 727
 728	/* disable hda bus irq and streams */
 729	hda_dsp_ctrl_stop_chip(sdev);
 730
 731	/* disable LP retention mode */
 732	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
 733				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
 734
 735	/* reset controller */
 736	ret = hda_dsp_ctrl_link_reset(sdev, true);
 737	if (ret < 0) {
 738		dev_err(sdev->dev,
 739			"error: failed to reset controller during suspend\n");
 740		return ret;
 741	}
 742
 743	/* display codec can powered off after link reset */
 744	hda_codec_i915_display_power(sdev, false);
 745
 746	return 0;
 747}
 748
 749static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 750{
 751	int ret;
 752
 753	/* display codec must be powered before link reset */
 754	hda_codec_i915_display_power(sdev, true);
 755
 756	/*
 757	 * clear TCSEL to clear playback on some HD Audio
 758	 * codecs. PCI TCSEL is defined in the Intel manuals.
 759	 */
 760	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
 761
 762	/* reset and start hda controller */
 763	ret = hda_dsp_ctrl_init_chip(sdev);
 764	if (ret < 0) {
 765		dev_err(sdev->dev,
 766			"error: failed to start controller after resume\n");
 767		goto cleanup;
 768	}
 769
 770	/* check jack status */
 771	if (runtime_resume) {
 772		hda_codec_jack_wake_enable(sdev, false);
 773		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
 774			hda_codec_jack_check(sdev);
 775	}
 776
 777	if (!sdev->dspless_mode_selected) {
 778		/* enable ppcap interrupt */
 779		hda_dsp_ctrl_ppcap_enable(sdev, true);
 780		hda_dsp_ctrl_ppcap_int_enable(sdev, true);
 781	}
 782
 783cleanup:
 784	/* display codec can powered off after controller init */
 785	hda_codec_i915_display_power(sdev, false);
 786
 787	return 0;
 788}
 789
 790int hda_dsp_resume(struct snd_sof_dev *sdev)
 791{
 792	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 793	struct hdac_bus *bus = sof_to_bus(sdev);
 794	struct pci_dev *pci = to_pci_dev(sdev->dev);
 795	const struct sof_dsp_power_state target_state = {
 796		.state = SOF_DSP_PM_D0,
 797		.substate = SOF_HDA_DSP_PM_D0I0,
 798	};
 799	int ret;
 800
 801	/* resume from D0I3 */
 802	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
 803		ret = hda_bus_ml_resume(bus);
 804		if (ret < 0) {
 805			dev_err(sdev->dev,
 806				"error %d in %s: failed to power up links",
 807				ret, __func__);
 808			return ret;
 809		}
 810
 811		/* set up CORB/RIRB buffers if was on before suspend */
 812		hda_codec_resume_cmd_io(sdev);
 813
 814		/* Set DSP power state */
 815		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
 816		if (ret < 0) {
 817			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 818				target_state.state, target_state.substate);
 819			return ret;
 820		}
 821
 822		/* restore L1SEN bit */
 823		if (hda->l1_disabled)
 824			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
 825						HDA_VS_INTEL_EM2,
 826						HDA_VS_INTEL_EM2_L1SEN, 0);
 827
 828		/* restore and disable the system wakeup */
 829		pci_restore_state(pci);
 830		disable_irq_wake(pci->irq);
 831		return 0;
 832	}
 833
 834	/* init hda controller. DSP cores will be powered up during fw boot */
 835	ret = hda_resume(sdev, false);
 836	if (ret < 0)
 837		return ret;
 838
 839	return snd_sof_dsp_set_power_state(sdev, &target_state);
 840}
 
 841
 842int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
 843{
 844	const struct sof_dsp_power_state target_state = {
 845		.state = SOF_DSP_PM_D0,
 846	};
 847	int ret;
 848
 849	/* init hda controller. DSP cores will be powered up during fw boot */
 850	ret = hda_resume(sdev, true);
 851	if (ret < 0)
 852		return ret;
 853
 854	return snd_sof_dsp_set_power_state(sdev, &target_state);
 855}
 
 856
 857int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
 858{
 859	struct hdac_bus *hbus = sof_to_bus(sdev);
 860
 861	if (hbus->codec_powered) {
 862		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
 863			(unsigned int)hbus->codec_powered);
 864		return -EBUSY;
 865	}
 866
 867	return 0;
 868}
 
 869
 870int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
 871{
 872	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 873	const struct sof_dsp_power_state target_state = {
 874		.state = SOF_DSP_PM_D3,
 875	};
 876	int ret;
 877
 878	if (!sdev->dspless_mode_selected) {
 879		/* cancel any attempt for DSP D0I3 */
 880		cancel_delayed_work_sync(&hda->d0i3_work);
 881	}
 882
 883	/* stop hda controller and power dsp off */
 884	ret = hda_suspend(sdev, true);
 885	if (ret < 0)
 886		return ret;
 887
 888	return snd_sof_dsp_set_power_state(sdev, &target_state);
 889}
 
 890
 891int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
 892{
 893	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 894	struct hdac_bus *bus = sof_to_bus(sdev);
 895	struct pci_dev *pci = to_pci_dev(sdev->dev);
 896	const struct sof_dsp_power_state target_dsp_state = {
 897		.state = target_state,
 898		.substate = target_state == SOF_DSP_PM_D0 ?
 899				SOF_HDA_DSP_PM_D0I3 : 0,
 900	};
 901	int ret;
 902
 903	if (!sdev->dspless_mode_selected) {
 904		/* cancel any attempt for DSP D0I3 */
 905		cancel_delayed_work_sync(&hda->d0i3_work);
 906	}
 907
 908	if (target_state == SOF_DSP_PM_D0) {
 909		/* Set DSP power state */
 910		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 911		if (ret < 0) {
 912			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 913				target_dsp_state.state,
 914				target_dsp_state.substate);
 915			return ret;
 916		}
 917
 918		/* enable L1SEN to make sure the system can enter S0Ix */
 919		if (hda->l1_disabled)
 920			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
 921						HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
 922
 923		/* stop the CORB/RIRB DMA if it is On */
 924		hda_codec_suspend_cmd_io(sdev);
 925
 926		/* no link can be powered in s0ix state */
 927		ret = hda_bus_ml_suspend(bus);
 928		if (ret < 0) {
 929			dev_err(sdev->dev,
 930				"error %d in %s: failed to power down links",
 931				ret, __func__);
 932			return ret;
 933		}
 934
 935		/* enable the system waking up via IPC IRQ */
 936		enable_irq_wake(pci->irq);
 937		pci_save_state(pci);
 938		return 0;
 939	}
 940
 941	/* stop hda controller and power dsp off */
 942	ret = hda_suspend(sdev, false);
 943	if (ret < 0) {
 944		dev_err(bus->dev, "error: suspending dsp\n");
 945		return ret;
 946	}
 947
 948	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 949}
 
 950
 951static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
 952{
 953	struct hdac_bus *bus = sof_to_bus(sdev);
 954	struct hdac_stream *s;
 955	unsigned int active_streams = 0;
 956	int sd_offset;
 957	u32 val;
 958
 959	list_for_each_entry(s, &bus->stream_list, list) {
 960		sd_offset = SOF_STREAM_SD_OFFSET(s);
 961		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
 962				       sd_offset);
 963		if (val & SOF_HDA_SD_CTL_DMA_START)
 964			active_streams |= BIT(s->index);
 965	}
 966
 967	return active_streams;
 968}
 969
 970static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
 971{
 972	int ret;
 973
 974	/*
 975	 * Do not assume a certain timing between the prior
 976	 * suspend flow, and running of this quirk function.
 977	 * This is needed if the controller was just put
 978	 * to reset before calling this function.
 979	 */
 980	usleep_range(500, 1000);
 981
 982	/*
 983	 * Take controller out of reset to flush DMA
 984	 * transactions.
 985	 */
 986	ret = hda_dsp_ctrl_link_reset(sdev, false);
 987	if (ret < 0)
 988		return ret;
 989
 990	usleep_range(500, 1000);
 991
 992	/* Restore state for shutdown, back to reset */
 993	ret = hda_dsp_ctrl_link_reset(sdev, true);
 994	if (ret < 0)
 995		return ret;
 996
 997	return ret;
 998}
 999
1000int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
1001{
1002	unsigned int active_streams;
1003	int ret, ret2;
1004
1005	/* check if DMA cleanup has been successful */
1006	active_streams = hda_dsp_check_for_dma_streams(sdev);
1007
1008	sdev->system_suspend_target = SOF_SUSPEND_S3;
1009	ret = snd_sof_suspend(sdev->dev);
1010
1011	if (active_streams) {
1012		dev_warn(sdev->dev,
1013			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
1014			 active_streams);
1015		ret2 = hda_dsp_s5_quirk(sdev);
1016		if (ret2 < 0)
1017			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
1018	}
1019
1020	return ret;
1021}
 
1022
1023int hda_dsp_shutdown(struct snd_sof_dev *sdev)
1024{
1025	sdev->system_suspend_target = SOF_SUSPEND_S3;
1026	return snd_sof_suspend(sdev->dev);
1027}
 
1028
1029int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
1030{
1031	int ret;
1032
1033	/* make sure all DAI resources are freed */
1034	ret = hda_dsp_dais_suspend(sdev);
1035	if (ret < 0)
1036		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1037
1038	return ret;
1039}
 
1040
1041void hda_dsp_d0i3_work(struct work_struct *work)
1042{
1043	struct sof_intel_hda_dev *hdev = container_of(work,
1044						      struct sof_intel_hda_dev,
1045						      d0i3_work.work);
1046	struct hdac_bus *bus = &hdev->hbus.core;
1047	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1048	struct sof_dsp_power_state target_state = {
1049		.state = SOF_DSP_PM_D0,
1050		.substate = SOF_HDA_DSP_PM_D0I3,
1051	};
1052	int ret;
1053
1054	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1055	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1056		/* remain in D0I0 */
1057		return;
1058
1059	/* This can fail but error cannot be propagated */
1060	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1061	if (ret < 0)
1062		dev_err_ratelimited(sdev->dev,
1063				    "error: failed to set DSP state %d substate %d\n",
1064				    target_state.state, target_state.substate);
1065}
 
1066
1067int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1068{
1069	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1070	int ret, ret1;
1071
1072	/* power up core */
1073	ret = hda_dsp_enable_core(sdev, BIT(core));
1074	if (ret < 0) {
1075		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1076			core, ret);
1077		return ret;
1078	}
1079
1080	/* No need to send IPC for primary core or if FW boot is not complete */
1081	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1082		return 0;
1083
1084	/* No need to continue the set_core_state ops is not available */
1085	if (!pm_ops->set_core_state)
1086		return 0;
1087
1088	/* Now notify DSP for secondary cores */
1089	ret = pm_ops->set_core_state(sdev, core, true);
1090	if (ret < 0) {
1091		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1092			core, ret);
1093		goto power_down;
1094	}
1095
1096	return ret;
1097
1098power_down:
1099	/* power down core if it is host managed and return the original error if this fails too */
1100	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1101	if (ret1 < 0)
1102		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1103
1104	return ret;
1105}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106
1107int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1108{
1109	hda_sdw_int_enable(sdev, false);
1110	hda_dsp_ipc_int_disable(sdev);
1111
1112	return 0;
1113}
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2//
   3// This file is provided under a dual BSD/GPLv2 license.  When using or
   4// redistributing this file, you may do so under either license.
   5//
   6// Copyright(c) 2018 Intel Corporation
   7//
   8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
   9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
  10//	    Rander Wang <rander.wang@intel.com>
  11//          Keyon Jie <yang.jie@linux.intel.com>
  12//
  13
  14/*
  15 * Hardware interface for generic Intel audio DSP HDA IP
  16 */
  17
  18#include <linux/module.h>
  19#include <sound/hdaudio_ext.h>
  20#include <sound/hda_register.h>
  21#include <sound/hda-mlink.h>
  22#include <trace/events/sof_intel.h>
  23#include <sound/sof/xtensa.h>
  24#include "../sof-audio.h"
  25#include "../ops.h"
  26#include "hda.h"
  27#include "mtl.h"
  28#include "hda-ipc.h"
  29
  30#define EXCEPT_MAX_HDR_SIZE	0x400
  31#define HDA_EXT_ROM_STATUS_SIZE 8
  32
  33struct hda_dsp_msg_code {
  34	u32 code;
  35	const char *text;
  36};
  37
  38static bool hda_enable_trace_D0I3_S0;
  39#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
  40module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
  41MODULE_PARM_DESC(enable_trace_D0I3_S0,
  42		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
  43#endif
  44
  45static void hda_get_interfaces(struct snd_sof_dev *sdev, u32 *interface_mask)
  46{
  47	const struct sof_intel_dsp_desc *chip;
  48
  49	chip = get_chip_info(sdev->pdata);
  50	switch (chip->hw_ip_version) {
  51	case SOF_INTEL_TANGIER:
  52	case SOF_INTEL_BAYTRAIL:
  53	case SOF_INTEL_BROADWELL:
  54		interface_mask[SOF_DAI_DSP_ACCESS] =  BIT(SOF_DAI_INTEL_SSP);
  55		break;
  56	case SOF_INTEL_CAVS_1_5:
  57	case SOF_INTEL_CAVS_1_5_PLUS:
  58		interface_mask[SOF_DAI_DSP_ACCESS] =
  59			BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | BIT(SOF_DAI_INTEL_HDA);
  60		interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA);
  61		break;
  62	case SOF_INTEL_CAVS_1_8:
  63	case SOF_INTEL_CAVS_2_0:
  64	case SOF_INTEL_CAVS_2_5:
  65	case SOF_INTEL_ACE_1_0:
  66		interface_mask[SOF_DAI_DSP_ACCESS] =
  67			BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
  68			BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
  69		interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA);
  70		break;
  71	case SOF_INTEL_ACE_2_0:
  72	case SOF_INTEL_ACE_3_0:
  73		interface_mask[SOF_DAI_DSP_ACCESS] =
  74			BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
  75			BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
  76		 /* all interfaces accessible without DSP */
  77		interface_mask[SOF_DAI_HOST_ACCESS] =
  78			interface_mask[SOF_DAI_DSP_ACCESS];
  79		break;
  80	default:
  81		break;
  82	}
  83}
  84
  85u32 hda_get_interface_mask(struct snd_sof_dev *sdev)
  86{
  87	u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 };
  88
  89	hda_get_interfaces(sdev, interface_mask);
  90
  91	return interface_mask[sdev->dspless_mode_selected];
  92}
  93EXPORT_SYMBOL_NS(hda_get_interface_mask, "SND_SOC_SOF_INTEL_HDA_COMMON");
  94
  95bool hda_is_chain_dma_supported(struct snd_sof_dev *sdev, u32 dai_type)
  96{
  97	u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 };
  98	const struct sof_intel_dsp_desc *chip;
  99
 100	if (sdev->dspless_mode_selected)
 101		return false;
 102
 103	hda_get_interfaces(sdev, interface_mask);
 104
 105	if (!(interface_mask[SOF_DAI_DSP_ACCESS] & BIT(dai_type)))
 106		return false;
 107
 108	if (dai_type == SOF_DAI_INTEL_HDA)
 109		return true;
 110
 111	switch (dai_type) {
 112	case SOF_DAI_INTEL_SSP:
 113	case SOF_DAI_INTEL_DMIC:
 114	case SOF_DAI_INTEL_ALH:
 115		chip = get_chip_info(sdev->pdata);
 116		if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
 117			return false;
 118		return true;
 119	default:
 120		return false;
 121	}
 122}
 123EXPORT_SYMBOL_NS(hda_is_chain_dma_supported, "SND_SOC_SOF_INTEL_HDA_COMMON");
 124
 125/*
 126 * DSP Core control.
 127 */
 128
 129static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
 130{
 131	u32 adspcs;
 132	u32 reset;
 133	int ret;
 134
 135	/* set reset bits for cores */
 136	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
 137	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 138					 HDA_DSP_REG_ADSPCS,
 139					 reset, reset);
 140
 141	/* poll with timeout to check if operation successful */
 142	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 143					HDA_DSP_REG_ADSPCS, adspcs,
 144					((adspcs & reset) == reset),
 145					HDA_DSP_REG_POLL_INTERVAL_US,
 146					HDA_DSP_RESET_TIMEOUT_US);
 147	if (ret < 0) {
 148		dev_err(sdev->dev,
 149			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 150			__func__);
 151		return ret;
 152	}
 153
 154	/* has core entered reset ? */
 155	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 156				  HDA_DSP_REG_ADSPCS);
 157	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
 158		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
 159		dev_err(sdev->dev,
 160			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
 161			core_mask, adspcs);
 162		ret = -EIO;
 163	}
 164
 165	return ret;
 166}
 167
 168static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
 169{
 170	unsigned int crst;
 171	u32 adspcs;
 172	int ret;
 173
 174	/* clear reset bits for cores */
 175	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 176					 HDA_DSP_REG_ADSPCS,
 177					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
 178					 0);
 179
 180	/* poll with timeout to check if operation successful */
 181	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
 182	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 183					    HDA_DSP_REG_ADSPCS, adspcs,
 184					    !(adspcs & crst),
 185					    HDA_DSP_REG_POLL_INTERVAL_US,
 186					    HDA_DSP_RESET_TIMEOUT_US);
 187
 188	if (ret < 0) {
 189		dev_err(sdev->dev,
 190			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 191			__func__);
 192		return ret;
 193	}
 194
 195	/* has core left reset ? */
 196	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 197				  HDA_DSP_REG_ADSPCS);
 198	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
 199		dev_err(sdev->dev,
 200			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
 201			core_mask, adspcs);
 202		ret = -EIO;
 203	}
 204
 205	return ret;
 206}
 207
 208int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
 209{
 210	/* stall core */
 211	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 212					 HDA_DSP_REG_ADSPCS,
 213					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 214					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 215
 216	/* set reset state */
 217	return hda_dsp_core_reset_enter(sdev, core_mask);
 218}
 219EXPORT_SYMBOL_NS(hda_dsp_core_stall_reset, "SND_SOC_SOF_INTEL_HDA_COMMON");
 220
 221bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
 222{
 223	int val;
 224	bool is_enable;
 225
 226	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 227
 228#define MASK_IS_EQUAL(v, m, field) ({	\
 229	u32 _m = field(m);		\
 230	((v) & _m) == _m;		\
 231})
 232
 233	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
 234		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
 235		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
 236		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
 237
 238#undef MASK_IS_EQUAL
 239
 240	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
 241		is_enable, core_mask);
 242
 243	return is_enable;
 244}
 245EXPORT_SYMBOL_NS(hda_dsp_core_is_enabled, "SND_SOC_SOF_INTEL_HDA_COMMON");
 246
 247int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
 248{
 249	int ret;
 250
 251	/* leave reset state */
 252	ret = hda_dsp_core_reset_leave(sdev, core_mask);
 253	if (ret < 0)
 254		return ret;
 255
 256	/* run core */
 257	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
 258	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 259					 HDA_DSP_REG_ADSPCS,
 260					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
 261					 0);
 262
 263	/* is core now running ? */
 264	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
 265		hda_dsp_core_stall_reset(sdev, core_mask);
 266		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
 267			core_mask);
 268		ret = -EIO;
 269	}
 270
 271	return ret;
 272}
 273EXPORT_SYMBOL_NS(hda_dsp_core_run, "SND_SOC_SOF_INTEL_HDA_COMMON");
 274
 275/*
 276 * Power Management.
 277 */
 278
 279int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
 280{
 281	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 282	const struct sof_intel_dsp_desc *chip = hda->desc;
 283	unsigned int cpa;
 284	u32 adspcs;
 285	int ret;
 286
 287	/* restrict core_mask to host managed cores mask */
 288	core_mask &= chip->host_managed_cores_mask;
 289	/* return if core_mask is not valid */
 290	if (!core_mask)
 291		return 0;
 292
 293	/* update bits */
 294	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
 295				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
 296				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
 297
 298	/* poll with timeout to check if operation successful */
 299	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
 300	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 301					    HDA_DSP_REG_ADSPCS, adspcs,
 302					    (adspcs & cpa) == cpa,
 303					    HDA_DSP_REG_POLL_INTERVAL_US,
 304					    HDA_DSP_RESET_TIMEOUT_US);
 305	if (ret < 0) {
 306		dev_err(sdev->dev,
 307			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 308			__func__);
 309		return ret;
 310	}
 311
 312	/* did core power up ? */
 313	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
 314				  HDA_DSP_REG_ADSPCS);
 315	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
 316		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
 317		dev_err(sdev->dev,
 318			"error: power up core failed core_mask %xadspcs 0x%x\n",
 319			core_mask, adspcs);
 320		ret = -EIO;
 321	}
 322
 323	return ret;
 324}
 325EXPORT_SYMBOL_NS(hda_dsp_core_power_up, "SND_SOC_SOF_INTEL_HDA_COMMON");
 326
 327static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
 328{
 329	u32 adspcs;
 330	int ret;
 331
 332	/* update bits */
 333	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
 334					 HDA_DSP_REG_ADSPCS,
 335					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
 336
 337	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
 338				HDA_DSP_REG_ADSPCS, adspcs,
 339				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
 340				HDA_DSP_REG_POLL_INTERVAL_US,
 341				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
 342	if (ret < 0)
 343		dev_err(sdev->dev,
 344			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
 345			__func__);
 346
 347	return ret;
 348}
 349
 350int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
 351{
 352	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 353	const struct sof_intel_dsp_desc *chip = hda->desc;
 354	int ret;
 355
 356	/* restrict core_mask to host managed cores mask */
 357	core_mask &= chip->host_managed_cores_mask;
 358
 359	/* return if core_mask is not valid or cores are already enabled */
 360	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
 361		return 0;
 362
 363	/* power up */
 364	ret = hda_dsp_core_power_up(sdev, core_mask);
 365	if (ret < 0) {
 366		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
 367			core_mask);
 368		return ret;
 369	}
 370
 371	return hda_dsp_core_run(sdev, core_mask);
 372}
 373EXPORT_SYMBOL_NS(hda_dsp_enable_core, "SND_SOC_SOF_INTEL_HDA_COMMON");
 374
 375int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
 376				  unsigned int core_mask)
 377{
 378	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 379	const struct sof_intel_dsp_desc *chip = hda->desc;
 380	int ret;
 381
 382	/* restrict core_mask to host managed cores mask */
 383	core_mask &= chip->host_managed_cores_mask;
 384
 385	/* return if core_mask is not valid */
 386	if (!core_mask)
 387		return 0;
 388
 389	/* place core in reset prior to power down */
 390	ret = hda_dsp_core_stall_reset(sdev, core_mask);
 391	if (ret < 0) {
 392		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
 393			core_mask);
 394		return ret;
 395	}
 396
 397	/* power down core */
 398	ret = hda_dsp_core_power_down(sdev, core_mask);
 399	if (ret < 0) {
 400		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
 401			core_mask, ret);
 402		return ret;
 403	}
 404
 405	/* make sure we are in OFF state */
 406	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
 407		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
 408			core_mask, ret);
 409		ret = -EIO;
 410	}
 411
 412	return ret;
 413}
 414EXPORT_SYMBOL_NS(hda_dsp_core_reset_power_down, "SND_SOC_SOF_INTEL_HDA_COMMON");
 415
 416void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
 417{
 418	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 419	const struct sof_intel_dsp_desc *chip = hda->desc;
 420
 421	if (sdev->dspless_mode_selected)
 422		return;
 423
 424	/* enable IPC DONE and BUSY interrupts */
 425	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 426			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
 427			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
 428
 429	/* enable IPC interrupt */
 430	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 431				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
 432}
 433EXPORT_SYMBOL_NS(hda_dsp_ipc_int_enable, "SND_SOC_SOF_INTEL_HDA_COMMON");
 434
 435void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
 436{
 437	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 438	const struct sof_intel_dsp_desc *chip = hda->desc;
 439
 440	if (sdev->dspless_mode_selected)
 441		return;
 442
 443	/* disable IPC interrupt */
 444	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
 445				HDA_DSP_ADSPIC_IPC, 0);
 446
 447	/* disable IPC BUSY and DONE interrupt */
 448	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
 449			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
 450}
 451EXPORT_SYMBOL_NS(hda_dsp_ipc_int_disable, "SND_SOC_SOF_INTEL_HDA_COMMON");
 452
 453static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
 454{
 455	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
 456	struct snd_sof_pdata *pdata = sdev->pdata;
 457	const struct sof_intel_dsp_desc *chip;
 458
 459	chip = get_chip_info(pdata);
 460	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
 461		SOF_HDA_VS_D0I3C_CIP) {
 462		if (!retry--)
 463			return -ETIMEDOUT;
 464		usleep_range(10, 15);
 465	}
 466
 467	return 0;
 468}
 469
 470static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
 471{
 472	const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
 473
 474	if (pm_ops && pm_ops->set_pm_gate)
 475		return pm_ops->set_pm_gate(sdev, flags);
 476
 477	return 0;
 478}
 479
 480static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
 481{
 482	struct snd_sof_pdata *pdata = sdev->pdata;
 483	const struct sof_intel_dsp_desc *chip;
 484	int ret;
 485	u8 reg;
 486
 487	chip = get_chip_info(pdata);
 488
 489	/* Write to D0I3C after Command-In-Progress bit is cleared */
 490	ret = hda_dsp_wait_d0i3c_done(sdev);
 491	if (ret < 0) {
 492		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
 493		return ret;
 494	}
 495
 496	/* Update D0I3C register */
 497	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
 498			    SOF_HDA_VS_D0I3C_I3, value);
 499
 500	/*
 501	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
 502	 * A delay is recommended before checking if D0I3C::CIP is cleared
 503	 */
 504	usleep_range(30, 40);
 505
 506	/* Wait for cmd in progress to be cleared before exiting the function */
 507	ret = hda_dsp_wait_d0i3c_done(sdev);
 508	if (ret < 0) {
 509		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
 510		return ret;
 511	}
 512
 513	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
 514	/* Confirm d0i3 state changed with paranoia check */
 515	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
 516		dev_err(sdev->dev, "failed to update D0I3C!\n");
 517		return -EIO;
 518	}
 519
 520	trace_sof_intel_D0I3C_updated(sdev, reg);
 521
 522	return 0;
 523}
 524
 525/*
 526 * d0i3 streaming is enabled if all the active streams can
 527 * work in d0i3 state and playback is enabled
 528 */
 529static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
 530{
 531	struct snd_pcm_substream *substream;
 532	struct snd_sof_pcm *spcm;
 533	bool playback_active = false;
 534	int dir;
 535
 536	list_for_each_entry(spcm, &sdev->pcm_list, list) {
 537		for_each_pcm_streams(dir) {
 538			substream = spcm->stream[dir].substream;
 539			if (!substream || !substream->runtime)
 540				continue;
 541
 542			if (!spcm->stream[dir].d0i3_compatible)
 543				return false;
 544
 545			if (dir == SNDRV_PCM_STREAM_PLAYBACK)
 546				playback_active = true;
 547		}
 548	}
 549
 550	return playback_active;
 551}
 552
 553static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
 554				const struct sof_dsp_power_state *target_state)
 555{
 556	u32 flags = 0;
 557	int ret;
 558	u8 value = 0;
 559
 560	/*
 561	 * Sanity check for illegal state transitions
 562	 * The only allowed transitions are:
 563	 * 1. D3 -> D0I0
 564	 * 2. D0I0 -> D0I3
 565	 * 3. D0I3 -> D0I0
 566	 */
 567	switch (sdev->dsp_power_state.state) {
 568	case SOF_DSP_PM_D0:
 569		/* Follow the sequence below for D0 substate transitions */
 570		break;
 571	case SOF_DSP_PM_D3:
 572		/* Follow regular flow for D3 -> D0 transition */
 573		return 0;
 574	default:
 575		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
 576			sdev->dsp_power_state.state, target_state->state);
 577		return -EINVAL;
 578	}
 579
 580	/* Set flags and register value for D0 target substate */
 581	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
 582		value = SOF_HDA_VS_D0I3C_I3;
 583
 584		/*
 585		 * Trace DMA need to be disabled when the DSP enters
 586		 * D0I3 for S0Ix suspend, but it can be kept enabled
 587		 * when the DSP enters D0I3 while the system is in S0
 588		 * for debug purpose.
 589		 */
 590		if (!sdev->fw_trace_is_supported ||
 591		    !hda_enable_trace_D0I3_S0 ||
 592		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
 593			flags = HDA_PM_NO_DMA_TRACE;
 594
 595		if (hda_dsp_d0i3_streaming_applicable(sdev))
 596			flags |= HDA_PM_PG_STREAMING;
 597	} else {
 598		/* prevent power gating in D0I0 */
 599		flags = HDA_PM_PPG;
 600	}
 601
 602	/* update D0I3C register */
 603	ret = hda_dsp_update_d0i3c_register(sdev, value);
 604	if (ret < 0)
 605		return ret;
 606
 607	/*
 608	 * Notify the DSP of the state change.
 609	 * If this IPC fails, revert the D0I3C register update in order
 610	 * to prevent partial state change.
 611	 */
 612	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
 613	if (ret < 0) {
 614		dev_err(sdev->dev,
 615			"error: PM_GATE ipc error %d\n", ret);
 616		goto revert;
 617	}
 618
 619	return ret;
 620
 621revert:
 622	/* fallback to the previous register value */
 623	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
 624
 625	/*
 626	 * This can fail but return the IPC error to signal that
 627	 * the state change failed.
 628	 */
 629	hda_dsp_update_d0i3c_register(sdev, value);
 630
 631	return ret;
 632}
 633
 634/* helper to log DSP state */
 635static void hda_dsp_state_log(struct snd_sof_dev *sdev)
 636{
 637	switch (sdev->dsp_power_state.state) {
 638	case SOF_DSP_PM_D0:
 639		switch (sdev->dsp_power_state.substate) {
 640		case SOF_HDA_DSP_PM_D0I0:
 641			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
 642			break;
 643		case SOF_HDA_DSP_PM_D0I3:
 644			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
 645			break;
 646		default:
 647			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
 648				sdev->dsp_power_state.substate);
 649			break;
 650		}
 651		break;
 652	case SOF_DSP_PM_D1:
 653		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
 654		break;
 655	case SOF_DSP_PM_D2:
 656		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
 657		break;
 658	case SOF_DSP_PM_D3:
 659		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
 660		break;
 661	default:
 662		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
 663			sdev->dsp_power_state.state);
 664		break;
 665	}
 666}
 667
 668/*
 669 * All DSP power state transitions are initiated by the driver.
 670 * If the requested state change fails, the error is simply returned.
 671 * Further state transitions are attempted only when the set_power_save() op
 672 * is called again either because of a new IPC sent to the DSP or
 673 * during system suspend/resume.
 674 */
 675static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
 676				   const struct sof_dsp_power_state *target_state)
 677{
 678	int ret = 0;
 679
 680	switch (target_state->state) {
 681	case SOF_DSP_PM_D0:
 682		ret = hda_dsp_set_D0_state(sdev, target_state);
 683		break;
 684	case SOF_DSP_PM_D3:
 685		/* The only allowed transition is: D0I0 -> D3 */
 686		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
 687		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
 688			break;
 689
 690		dev_err(sdev->dev,
 691			"error: transition from %d to %d not allowed\n",
 692			sdev->dsp_power_state.state, target_state->state);
 693		return -EINVAL;
 694	default:
 695		dev_err(sdev->dev, "error: target state unsupported %d\n",
 696			target_state->state);
 697		return -EINVAL;
 698	}
 699	if (ret < 0) {
 700		dev_err(sdev->dev,
 701			"failed to set requested target DSP state %d substate %d\n",
 702			target_state->state, target_state->substate);
 703		return ret;
 704	}
 705
 706	sdev->dsp_power_state = *target_state;
 707	hda_dsp_state_log(sdev);
 708	return ret;
 709}
 710
 711int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
 712				 const struct sof_dsp_power_state *target_state)
 713{
 714	/*
 715	 * When the DSP is already in D0I3 and the target state is D0I3,
 716	 * it could be the case that the DSP is in D0I3 during S0
 717	 * and the system is suspending to S0Ix. Therefore,
 718	 * hda_dsp_set_D0_state() must be called to disable trace DMA
 719	 * by sending the PM_GATE IPC to the FW.
 720	 */
 721	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
 722	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
 723		return hda_dsp_set_power_state(sdev, target_state);
 724
 725	/*
 726	 * For all other cases, return without doing anything if
 727	 * the DSP is already in the target state.
 728	 */
 729	if (target_state->state == sdev->dsp_power_state.state &&
 730	    target_state->substate == sdev->dsp_power_state.substate)
 731		return 0;
 732
 733	return hda_dsp_set_power_state(sdev, target_state);
 734}
 735EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc3, "SND_SOC_SOF_INTEL_HDA_COMMON");
 736
 737int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
 738				 const struct sof_dsp_power_state *target_state)
 739{
 740	/* Return without doing anything if the DSP is already in the target state */
 741	if (target_state->state == sdev->dsp_power_state.state &&
 742	    target_state->substate == sdev->dsp_power_state.substate)
 743		return 0;
 744
 745	return hda_dsp_set_power_state(sdev, target_state);
 746}
 747EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc4, "SND_SOC_SOF_INTEL_HDA_COMMON");
 748
 749/*
 750 * Audio DSP states may transform as below:-
 751 *
 752 *                                         Opportunistic D0I3 in S0
 753 *     Runtime    +---------------------+  Delayed D0i3 work timeout
 754 *     suspend    |                     +--------------------+
 755 *   +------------+       D0I0(active)  |                    |
 756 *   |            |                     <---------------+    |
 757 *   |   +-------->                     |    New IPC	|    |
 758 *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
 759 *   |   |resume     |  |         |  |			|    |
 760 *   |   |           |  |         |  |			|    |
 761 *   |   |     System|  |         |  |			|    |
 762 *   |   |     resume|  | S3/S0IX |  |                  |    |
 763 *   |   |	     |  | suspend |  | S0IX             |    |
 764 *   |   |           |  |         |  |suspend           |    |
 765 *   |   |           |  |         |  |                  |    |
 766 *   |   |           |  |         |  |                  |    |
 767 * +-v---+-----------+--v-------+ |  |           +------+----v----+
 768 * |                            | |  +----------->                |
 769 * |       D3 (suspended)       | |              |      D0I3      |
 770 * |                            | +--------------+                |
 771 * |                            |  System resume |                |
 772 * +----------------------------+		 +----------------+
 773 *
 774 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
 775 *		 ignored the suspend trigger. Otherwise the DSP
 776 *		 is in D3.
 777 */
 778
 779static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 780{
 781	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 782	const struct sof_intel_dsp_desc *chip = hda->desc;
 783	struct hdac_bus *bus = sof_to_bus(sdev);
 784	bool imr_lost = false;
 785	int ret, j;
 786
 787	/*
 788	 * The memory used for IMR boot loses its content in deeper than S3
 789	 * state on CAVS platforms.
 790	 * On ACE platforms due to the system architecture the IMR content is
 791	 * lost at S3 state already, they are tailored for s2idle use.
 792	 * We must not try IMR boot on next power up in these cases as it will
 793	 * fail.
 794	 */
 795	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
 796	    (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 &&
 797	     sdev->system_suspend_target == SOF_SUSPEND_S3))
 798		imr_lost = true;
 799
 800	/*
 801	 * In case of firmware crash or boot failure set the skip_imr_boot to true
 802	 * as well in order to try to re-load the firmware to do a 'cold' boot.
 803	 */
 804	if (imr_lost || sdev->fw_state == SOF_FW_CRASHED ||
 
 805	    sdev->fw_state == SOF_FW_BOOT_FAILED)
 806		hda->skip_imr_boot = true;
 807
 808	ret = chip->disable_interrupts(sdev);
 809	if (ret < 0)
 810		return ret;
 811
 812	/* make sure that no irq handler is pending before shutdown */
 813	synchronize_irq(sdev->ipc_irq);
 814
 815	hda_codec_jack_wake_enable(sdev, runtime_suspend);
 816
 817	/* power down all hda links */
 818	hda_bus_ml_suspend(bus);
 819
 820	if (sdev->dspless_mode_selected)
 821		goto skip_dsp;
 822
 823	ret = chip->power_down_dsp(sdev);
 824	if (ret < 0) {
 825		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
 826		return ret;
 827	}
 828
 829	/* reset ref counts for all cores */
 830	for (j = 0; j < chip->cores_num; j++)
 831		sdev->dsp_core_ref_count[j] = 0;
 832
 833	/* disable ppcap interrupt */
 834	hda_dsp_ctrl_ppcap_enable(sdev, false);
 835	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
 836skip_dsp:
 837
 838	/* disable hda bus irq and streams */
 839	hda_dsp_ctrl_stop_chip(sdev);
 840
 841	/* disable LP retention mode */
 842	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
 843				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
 844
 845	/* reset controller */
 846	ret = hda_dsp_ctrl_link_reset(sdev, true);
 847	if (ret < 0) {
 848		dev_err(sdev->dev,
 849			"error: failed to reset controller during suspend\n");
 850		return ret;
 851	}
 852
 853	/* display codec can powered off after link reset */
 854	hda_codec_i915_display_power(sdev, false);
 855
 856	return 0;
 857}
 858
 859static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 860{
 861	int ret;
 862
 863	/* display codec must be powered before link reset */
 864	hda_codec_i915_display_power(sdev, true);
 865
 866	/*
 867	 * clear TCSEL to clear playback on some HD Audio
 868	 * codecs. PCI TCSEL is defined in the Intel manuals.
 869	 */
 870	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
 871
 872	/* reset and start hda controller */
 873	ret = hda_dsp_ctrl_init_chip(sdev);
 874	if (ret < 0) {
 875		dev_err(sdev->dev,
 876			"error: failed to start controller after resume\n");
 877		goto cleanup;
 878	}
 879
 880	/* check jack status */
 881	if (runtime_resume) {
 882		hda_codec_jack_wake_enable(sdev, false);
 883		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
 884			hda_codec_jack_check(sdev);
 885	}
 886
 887	if (!sdev->dspless_mode_selected) {
 888		/* enable ppcap interrupt */
 889		hda_dsp_ctrl_ppcap_enable(sdev, true);
 890		hda_dsp_ctrl_ppcap_int_enable(sdev, true);
 891	}
 892
 893cleanup:
 894	/* display codec can powered off after controller init */
 895	hda_codec_i915_display_power(sdev, false);
 896
 897	return 0;
 898}
 899
 900int hda_dsp_resume(struct snd_sof_dev *sdev)
 901{
 902	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 903	struct hdac_bus *bus = sof_to_bus(sdev);
 904	struct pci_dev *pci = to_pci_dev(sdev->dev);
 905	const struct sof_dsp_power_state target_state = {
 906		.state = SOF_DSP_PM_D0,
 907		.substate = SOF_HDA_DSP_PM_D0I0,
 908	};
 909	int ret;
 910
 911	/* resume from D0I3 */
 912	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
 913		ret = hda_bus_ml_resume(bus);
 914		if (ret < 0) {
 915			dev_err(sdev->dev,
 916				"error %d in %s: failed to power up links",
 917				ret, __func__);
 918			return ret;
 919		}
 920
 921		/* set up CORB/RIRB buffers if was on before suspend */
 922		hda_codec_resume_cmd_io(sdev);
 923
 924		/* Set DSP power state */
 925		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
 926		if (ret < 0) {
 927			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
 928				target_state.state, target_state.substate);
 929			return ret;
 930		}
 931
 932		/* restore L1SEN bit */
 933		if (hda->l1_disabled)
 934			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
 935						HDA_VS_INTEL_EM2,
 936						HDA_VS_INTEL_EM2_L1SEN, 0);
 937
 938		/* restore and disable the system wakeup */
 939		pci_restore_state(pci);
 940		disable_irq_wake(pci->irq);
 941		return 0;
 942	}
 943
 944	/* init hda controller. DSP cores will be powered up during fw boot */
 945	ret = hda_resume(sdev, false);
 946	if (ret < 0)
 947		return ret;
 948
 949	return snd_sof_dsp_set_power_state(sdev, &target_state);
 950}
 951EXPORT_SYMBOL_NS(hda_dsp_resume, "SND_SOC_SOF_INTEL_HDA_COMMON");
 952
 953int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
 954{
 955	const struct sof_dsp_power_state target_state = {
 956		.state = SOF_DSP_PM_D0,
 957	};
 958	int ret;
 959
 960	/* init hda controller. DSP cores will be powered up during fw boot */
 961	ret = hda_resume(sdev, true);
 962	if (ret < 0)
 963		return ret;
 964
 965	return snd_sof_dsp_set_power_state(sdev, &target_state);
 966}
 967EXPORT_SYMBOL_NS(hda_dsp_runtime_resume, "SND_SOC_SOF_INTEL_HDA_COMMON");
 968
 969int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
 970{
 971	struct hdac_bus *hbus = sof_to_bus(sdev);
 972
 973	if (hbus->codec_powered) {
 974		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
 975			(unsigned int)hbus->codec_powered);
 976		return -EBUSY;
 977	}
 978
 979	return 0;
 980}
 981EXPORT_SYMBOL_NS(hda_dsp_runtime_idle, "SND_SOC_SOF_INTEL_HDA_COMMON");
 982
 983int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
 984{
 985	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
 986	const struct sof_dsp_power_state target_state = {
 987		.state = SOF_DSP_PM_D3,
 988	};
 989	int ret;
 990
 991	if (!sdev->dspless_mode_selected) {
 992		/* cancel any attempt for DSP D0I3 */
 993		cancel_delayed_work_sync(&hda->d0i3_work);
 994	}
 995
 996	/* stop hda controller and power dsp off */
 997	ret = hda_suspend(sdev, true);
 998	if (ret < 0)
 999		return ret;
1000
1001	return snd_sof_dsp_set_power_state(sdev, &target_state);
1002}
1003EXPORT_SYMBOL_NS(hda_dsp_runtime_suspend, "SND_SOC_SOF_INTEL_HDA_COMMON");
1004
1005int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
1006{
1007	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
1008	struct hdac_bus *bus = sof_to_bus(sdev);
1009	struct pci_dev *pci = to_pci_dev(sdev->dev);
1010	const struct sof_dsp_power_state target_dsp_state = {
1011		.state = target_state,
1012		.substate = target_state == SOF_DSP_PM_D0 ?
1013				SOF_HDA_DSP_PM_D0I3 : 0,
1014	};
1015	int ret;
1016
1017	if (!sdev->dspless_mode_selected) {
1018		/* cancel any attempt for DSP D0I3 */
1019		cancel_delayed_work_sync(&hda->d0i3_work);
1020	}
1021
1022	if (target_state == SOF_DSP_PM_D0) {
1023		/* Set DSP power state */
1024		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
1025		if (ret < 0) {
1026			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
1027				target_dsp_state.state,
1028				target_dsp_state.substate);
1029			return ret;
1030		}
1031
1032		/* enable L1SEN to make sure the system can enter S0Ix */
1033		if (hda->l1_disabled)
1034			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
1035						HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
1036
1037		/* stop the CORB/RIRB DMA if it is On */
1038		hda_codec_suspend_cmd_io(sdev);
1039
1040		/* no link can be powered in s0ix state */
1041		ret = hda_bus_ml_suspend(bus);
1042		if (ret < 0) {
1043			dev_err(sdev->dev,
1044				"error %d in %s: failed to power down links",
1045				ret, __func__);
1046			return ret;
1047		}
1048
1049		/* enable the system waking up via IPC IRQ */
1050		enable_irq_wake(pci->irq);
1051		pci_save_state(pci);
1052		return 0;
1053	}
1054
1055	/* stop hda controller and power dsp off */
1056	ret = hda_suspend(sdev, false);
1057	if (ret < 0) {
1058		dev_err(bus->dev, "error: suspending dsp\n");
1059		return ret;
1060	}
1061
1062	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
1063}
1064EXPORT_SYMBOL_NS(hda_dsp_suspend, "SND_SOC_SOF_INTEL_HDA_COMMON");
1065
1066static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
1067{
1068	struct hdac_bus *bus = sof_to_bus(sdev);
1069	struct hdac_stream *s;
1070	unsigned int active_streams = 0;
1071	int sd_offset;
1072	u32 val;
1073
1074	list_for_each_entry(s, &bus->stream_list, list) {
1075		sd_offset = SOF_STREAM_SD_OFFSET(s);
1076		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
1077				       sd_offset);
1078		if (val & SOF_HDA_SD_CTL_DMA_START)
1079			active_streams |= BIT(s->index);
1080	}
1081
1082	return active_streams;
1083}
1084
1085static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
1086{
1087	int ret;
1088
1089	/*
1090	 * Do not assume a certain timing between the prior
1091	 * suspend flow, and running of this quirk function.
1092	 * This is needed if the controller was just put
1093	 * to reset before calling this function.
1094	 */
1095	usleep_range(500, 1000);
1096
1097	/*
1098	 * Take controller out of reset to flush DMA
1099	 * transactions.
1100	 */
1101	ret = hda_dsp_ctrl_link_reset(sdev, false);
1102	if (ret < 0)
1103		return ret;
1104
1105	usleep_range(500, 1000);
1106
1107	/* Restore state for shutdown, back to reset */
1108	ret = hda_dsp_ctrl_link_reset(sdev, true);
1109	if (ret < 0)
1110		return ret;
1111
1112	return ret;
1113}
1114
1115int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
1116{
1117	unsigned int active_streams;
1118	int ret, ret2;
1119
1120	/* check if DMA cleanup has been successful */
1121	active_streams = hda_dsp_check_for_dma_streams(sdev);
1122
1123	sdev->system_suspend_target = SOF_SUSPEND_S3;
1124	ret = snd_sof_suspend(sdev->dev);
1125
1126	if (active_streams) {
1127		dev_warn(sdev->dev,
1128			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
1129			 active_streams);
1130		ret2 = hda_dsp_s5_quirk(sdev);
1131		if (ret2 < 0)
1132			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
1133	}
1134
1135	return ret;
1136}
1137EXPORT_SYMBOL_NS(hda_dsp_shutdown_dma_flush, "SND_SOC_SOF_INTEL_HDA_COMMON");
1138
1139int hda_dsp_shutdown(struct snd_sof_dev *sdev)
1140{
1141	sdev->system_suspend_target = SOF_SUSPEND_S3;
1142	return snd_sof_suspend(sdev->dev);
1143}
1144EXPORT_SYMBOL_NS(hda_dsp_shutdown, "SND_SOC_SOF_INTEL_HDA_COMMON");
1145
1146int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
1147{
1148	int ret;
1149
1150	/* make sure all DAI resources are freed */
1151	ret = hda_dsp_dais_suspend(sdev);
1152	if (ret < 0)
1153		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1154
1155	return ret;
1156}
1157EXPORT_SYMBOL_NS(hda_dsp_set_hw_params_upon_resume, "SND_SOC_SOF_INTEL_HDA_COMMON");
1158
1159void hda_dsp_d0i3_work(struct work_struct *work)
1160{
1161	struct sof_intel_hda_dev *hdev = container_of(work,
1162						      struct sof_intel_hda_dev,
1163						      d0i3_work.work);
1164	struct hdac_bus *bus = &hdev->hbus.core;
1165	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1166	struct sof_dsp_power_state target_state = {
1167		.state = SOF_DSP_PM_D0,
1168		.substate = SOF_HDA_DSP_PM_D0I3,
1169	};
1170	int ret;
1171
1172	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1173	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1174		/* remain in D0I0 */
1175		return;
1176
1177	/* This can fail but error cannot be propagated */
1178	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1179	if (ret < 0)
1180		dev_err_ratelimited(sdev->dev,
1181				    "error: failed to set DSP state %d substate %d\n",
1182				    target_state.state, target_state.substate);
1183}
1184EXPORT_SYMBOL_NS(hda_dsp_d0i3_work, "SND_SOC_SOF_INTEL_HDA_COMMON");
1185
1186int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1187{
1188	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1189	int ret, ret1;
1190
1191	/* power up core */
1192	ret = hda_dsp_enable_core(sdev, BIT(core));
1193	if (ret < 0) {
1194		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1195			core, ret);
1196		return ret;
1197	}
1198
1199	/* No need to send IPC for primary core or if FW boot is not complete */
1200	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1201		return 0;
1202
1203	/* No need to continue the set_core_state ops is not available */
1204	if (!pm_ops->set_core_state)
1205		return 0;
1206
1207	/* Now notify DSP for secondary cores */
1208	ret = pm_ops->set_core_state(sdev, core, true);
1209	if (ret < 0) {
1210		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1211			core, ret);
1212		goto power_down;
1213	}
1214
1215	return ret;
1216
1217power_down:
1218	/* power down core if it is host managed and return the original error if this fails too */
1219	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1220	if (ret1 < 0)
1221		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1222
1223	return ret;
1224}
1225EXPORT_SYMBOL_NS(hda_dsp_core_get, "SND_SOC_SOF_INTEL_HDA_COMMON");
1226
1227#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
1228void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
1229{
1230	struct sof_intel_hda_dev *hdev;
1231
1232	hdev = sdev->pdata->hw_pdata;
1233
1234	if (!hdev->sdw)
1235		return;
1236
1237	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2,
1238				HDA_DSP_REG_ADSPIC2_SNDW,
1239				enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0);
1240}
1241EXPORT_SYMBOL_NS(hda_common_enable_sdw_irq, "SND_SOC_SOF_INTEL_HDA_COMMON");
1242
1243void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
1244{
1245	u32 interface_mask = hda_get_interface_mask(sdev);
1246	const struct sof_intel_dsp_desc *chip;
1247
1248	if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
1249		return;
1250
1251	chip = get_chip_info(sdev->pdata);
1252	if (chip && chip->enable_sdw_irq)
1253		chip->enable_sdw_irq(sdev, enable);
1254}
1255EXPORT_SYMBOL_NS(hda_sdw_int_enable, "SND_SOC_SOF_INTEL_HDA_COMMON");
1256
1257int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev)
1258{
1259	struct sof_intel_hda_dev *hdev;
1260	struct sdw_intel_ctx *ctx;
1261	u32 caps;
1262
1263	hdev = sdev->pdata->hw_pdata;
1264	ctx = hdev->sdw;
1265
1266	caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP);
1267	caps &= SDW_SHIM_LCAP_LCOUNT_MASK;
1268
1269	/* Check HW supported vs property value */
1270	if (caps < ctx->count) {
1271		dev_err(sdev->dev,
1272			"%s: BIOS master count %d is larger than hardware capabilities %d\n",
1273			__func__, ctx->count, caps);
1274		return -EINVAL;
1275	}
1276
1277	return 0;
1278}
1279EXPORT_SYMBOL_NS(hda_sdw_check_lcount_common, "SND_SOC_SOF_INTEL_HDA_COMMON");
1280
1281int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev)
1282{
1283	struct sof_intel_hda_dev *hdev;
1284	struct sdw_intel_ctx *ctx;
1285	struct hdac_bus *bus;
1286	u32 slcount;
1287
1288	bus = sof_to_bus(sdev);
1289
1290	hdev = sdev->pdata->hw_pdata;
1291	ctx = hdev->sdw;
1292
1293	slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
1294
1295	/* Check HW supported vs property value */
1296	if (slcount < ctx->count) {
1297		dev_err(sdev->dev,
1298			"%s: BIOS master count %d is larger than hardware capabilities %d\n",
1299			__func__, ctx->count, slcount);
1300		return -EINVAL;
1301	}
1302
1303	return 0;
1304}
1305EXPORT_SYMBOL_NS(hda_sdw_check_lcount_ext, "SND_SOC_SOF_INTEL_HDA_COMMON");
1306
1307int hda_sdw_check_lcount(struct snd_sof_dev *sdev)
1308{
1309	const struct sof_intel_dsp_desc *chip;
1310
1311	chip = get_chip_info(sdev->pdata);
1312	if (chip && chip->read_sdw_lcount)
1313		return chip->read_sdw_lcount(sdev);
1314
1315	return 0;
1316}
1317EXPORT_SYMBOL_NS(hda_sdw_check_lcount, "SND_SOC_SOF_INTEL_HDA_COMMON");
1318
1319void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
1320{
1321	u32 interface_mask = hda_get_interface_mask(sdev);
1322	const struct sof_intel_dsp_desc *chip;
1323
1324	if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
1325		return;
1326
1327	chip = get_chip_info(sdev->pdata);
1328	if (chip && chip->sdw_process_wakeen)
1329		chip->sdw_process_wakeen(sdev);
1330}
1331EXPORT_SYMBOL_NS(hda_sdw_process_wakeen, "SND_SOC_SOF_INTEL_HDA_COMMON");
1332
1333#endif
1334
1335int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1336{
1337	hda_sdw_int_enable(sdev, false);
1338	hda_dsp_ipc_int_disable(sdev);
1339
1340	return 0;
1341}
1342EXPORT_SYMBOL_NS(hda_dsp_disable_interrupts, "SND_SOC_SOF_INTEL_HDA_COMMON");
1343
1344static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = {
1345	{HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
1346	{HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
1347	{HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
1348	{HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
1349	{HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
1350	{HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
1351	{HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
1352	{HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
1353	{HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
1354	{HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
1355	{HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
1356	{HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
1357	{HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
1358	{HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
1359	{HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
1360	{HDA_DSP_ROM_NULL_FW_ENTRY,	"error: null FW entry point"},
1361};
1362
1363#define FSR_ROM_STATE_ENTRY(state)	{FSR_STATE_ROM_##state, #state}
1364static const struct hda_dsp_msg_code cavs_fsr_rom_state_names[] = {
1365	FSR_ROM_STATE_ENTRY(INIT),
1366	FSR_ROM_STATE_ENTRY(INIT_DONE),
1367	FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
1368	FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
1369	FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
1370	FSR_ROM_STATE_ENTRY(FW_ENTERED),
1371	FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
1372	FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
1373	FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT),
1374	FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE),
1375	/* CSE states */
1376	FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST),
1377	FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED),
1378	FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST),
1379	FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED),
1380	FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT),
1381	FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1),
1382	FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY),
1383	FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL),
1384	FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN),
1385};
1386
1387static const struct hda_dsp_msg_code ace_fsr_rom_state_names[] = {
1388	FSR_ROM_STATE_ENTRY(INIT),
1389	FSR_ROM_STATE_ENTRY(INIT_DONE),
1390	FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
1391	FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
1392	FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
1393	FSR_ROM_STATE_ENTRY(FW_ENTERED),
1394	FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
1395	FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
1396	FSR_ROM_STATE_ENTRY(RESET_VECTOR_DONE),
1397	FSR_ROM_STATE_ENTRY(PURGE_BOOT),
1398	FSR_ROM_STATE_ENTRY(RESTORE_BOOT),
1399	FSR_ROM_STATE_ENTRY(FW_ENTRY_POINT),
1400	FSR_ROM_STATE_ENTRY(VALIDATE_PUB_KEY),
1401	FSR_ROM_STATE_ENTRY(POWER_DOWN_HPSRAM),
1402	FSR_ROM_STATE_ENTRY(POWER_DOWN_ULPSRAM),
1403	FSR_ROM_STATE_ENTRY(POWER_UP_ULPSRAM_STACK),
1404	FSR_ROM_STATE_ENTRY(POWER_UP_HPSRAM_DMA),
1405	FSR_ROM_STATE_ENTRY(BEFORE_EP_POINTER_READ),
1406	FSR_ROM_STATE_ENTRY(VALIDATE_MANIFEST),
1407	FSR_ROM_STATE_ENTRY(VALIDATE_FW_MODULE),
1408	FSR_ROM_STATE_ENTRY(PROTECT_IMR_REGION),
1409	FSR_ROM_STATE_ENTRY(PUSH_MODEL_ROUTINE),
1410	FSR_ROM_STATE_ENTRY(PULL_MODEL_ROUTINE),
1411	FSR_ROM_STATE_ENTRY(VALIDATE_PKG_DIR),
1412	FSR_ROM_STATE_ENTRY(VALIDATE_CPD),
1413	FSR_ROM_STATE_ENTRY(VALIDATE_CSS_MAN_HEADER),
1414	FSR_ROM_STATE_ENTRY(VALIDATE_BLOB_SVN),
1415	FSR_ROM_STATE_ENTRY(VERIFY_IFWI_PARTITION),
1416	FSR_ROM_STATE_ENTRY(REMOVE_ACCESS_CONTROL),
1417	FSR_ROM_STATE_ENTRY(AUTH_BYPASS),
1418	FSR_ROM_STATE_ENTRY(AUTH_ENABLED),
1419	FSR_ROM_STATE_ENTRY(INIT_DMA),
1420	FSR_ROM_STATE_ENTRY(PURGE_FW_ENTRY),
1421	FSR_ROM_STATE_ENTRY(PURGE_FW_END),
1422	FSR_ROM_STATE_ENTRY(CLEAN_UP_BSS_DONE),
1423	FSR_ROM_STATE_ENTRY(IMR_RESTORE_ENTRY),
1424	FSR_ROM_STATE_ENTRY(IMR_RESTORE_END),
1425	FSR_ROM_STATE_ENTRY(FW_MANIFEST_IN_DMA_BUFF),
1426	FSR_ROM_STATE_ENTRY(LOAD_CSE_MAN_TO_IMR),
1427	FSR_ROM_STATE_ENTRY(LOAD_FW_MAN_TO_IMR),
1428	FSR_ROM_STATE_ENTRY(LOAD_FW_CODE_TO_IMR),
1429	FSR_ROM_STATE_ENTRY(FW_LOADING_DONE),
1430	FSR_ROM_STATE_ENTRY(FW_CODE_LOADED),
1431	FSR_ROM_STATE_ENTRY(VERIFY_IMAGE_TYPE),
1432	FSR_ROM_STATE_ENTRY(AUTH_API_INIT),
1433	FSR_ROM_STATE_ENTRY(AUTH_API_PROC),
1434	FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_BUSY),
1435	FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_RESULT),
1436	FSR_ROM_STATE_ENTRY(AUTH_API_CLEANUP),
1437};
1438
1439#define FSR_BRINGUP_STATE_ENTRY(state)	{FSR_STATE_BRINGUP_##state, #state}
1440static const struct hda_dsp_msg_code fsr_bringup_state_names[] = {
1441	FSR_BRINGUP_STATE_ENTRY(INIT),
1442	FSR_BRINGUP_STATE_ENTRY(INIT_DONE),
1443	FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD),
1444	FSR_BRINGUP_STATE_ENTRY(UNPACK_START),
1445	FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE),
1446	FSR_BRINGUP_STATE_ENTRY(FW_ENTERED),
1447};
1448
1449#define FSR_WAIT_STATE_ENTRY(state)	{FSR_WAIT_FOR_##state, #state}
1450static const struct hda_dsp_msg_code fsr_wait_state_names[] = {
1451	FSR_WAIT_STATE_ENTRY(IPC_BUSY),
1452	FSR_WAIT_STATE_ENTRY(IPC_DONE),
1453	FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION),
1454	FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF),
1455	FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL),
1456	FSR_WAIT_STATE_ENTRY(CSE_CSR),
1457};
1458
1459#define FSR_MODULE_NAME_ENTRY(mod)	[FSR_MOD_##mod] = #mod
1460static const char * const fsr_module_names[] = {
1461	FSR_MODULE_NAME_ENTRY(ROM),
1462	FSR_MODULE_NAME_ENTRY(ROM_BYP),
1463	FSR_MODULE_NAME_ENTRY(BASE_FW),
1464	FSR_MODULE_NAME_ENTRY(LP_BOOT),
1465	FSR_MODULE_NAME_ENTRY(BRNGUP),
1466	FSR_MODULE_NAME_ENTRY(ROM_EXT),
1467};
1468
1469static const char *
1470hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code,
1471		       size_t array_size)
1472{
1473	int i;
1474
1475	for (i = 0; i < array_size; i++) {
1476		if (code == msg_code[i].code)
1477			return msg_code[i].text;
1478	}
1479
1480	return NULL;
1481}
1482
1483void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level)
1484{
1485	const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
1486	const char *state_text, *error_text, *module_text;
1487	u32 fsr, state, wait_state, module, error_code;
1488
1489	fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg);
1490	state = FSR_TO_STATE_CODE(fsr);
1491	wait_state = FSR_TO_WAIT_STATE_CODE(fsr);
1492	module = FSR_TO_MODULE_CODE(fsr);
1493
1494	if (module > FSR_MOD_ROM_EXT)
1495		module_text = "unknown";
1496	else
1497		module_text = fsr_module_names[module];
1498
1499	if (module == FSR_MOD_BRNGUP) {
1500		state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names,
1501						    ARRAY_SIZE(fsr_bringup_state_names));
1502	} else {
1503		if (chip->hw_ip_version < SOF_INTEL_ACE_1_0)
1504			state_text = hda_dsp_get_state_text(state,
1505							cavs_fsr_rom_state_names,
1506							ARRAY_SIZE(cavs_fsr_rom_state_names));
1507		else
1508			state_text = hda_dsp_get_state_text(state,
1509							ace_fsr_rom_state_names,
1510							ARRAY_SIZE(ace_fsr_rom_state_names));
1511	}
1512
1513	/* not for us, must be generic sof message */
1514	if (!state_text) {
1515		dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr);
1516		return;
1517	}
1518
1519	if (wait_state) {
1520		const char *wait_state_text;
1521
1522		wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names,
1523							 ARRAY_SIZE(fsr_wait_state_names));
1524		if (!wait_state_text)
1525			wait_state_text = "unknown";
1526
1527		dev_printk(level, sdev->dev,
1528			   "%#010x: module: %s, state: %s, waiting for: %s, %s\n",
1529			   fsr, module_text, state_text, wait_state_text,
1530			   fsr & FSR_HALTED ? "not running" : "running");
1531	} else {
1532		dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n",
1533			   fsr, module_text, state_text,
1534			   fsr & FSR_HALTED ? "not running" : "running");
1535	}
1536
1537	error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4);
1538	if (!error_code)
1539		return;
1540
1541	error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts,
1542					    ARRAY_SIZE(hda_dsp_rom_fw_error_texts));
1543	if (!error_text)
1544		error_text = "unknown";
1545
1546	if (state == FSR_STATE_FW_ENTERED)
1547		dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code,
1548			   error_text);
1549	else
1550		dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code,
1551			   error_text);
1552}
1553EXPORT_SYMBOL_NS(hda_dsp_get_state, "SND_SOC_SOF_INTEL_HDA_COMMON");
1554
1555static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
1556				  struct sof_ipc_dsp_oops_xtensa *xoops,
1557				  struct sof_ipc_panic_info *panic_info,
1558				  u32 *stack, size_t stack_words)
1559{
1560	u32 offset = sdev->dsp_oops_offset;
1561
1562	/* first read registers */
1563	sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
1564
1565	/* note: variable AR register array is not read */
1566
1567	/* then get panic info */
1568	if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
1569		dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
1570			xoops->arch_hdr.totalsize);
1571		return;
1572	}
1573	offset += xoops->arch_hdr.totalsize;
1574	sof_block_read(sdev, sdev->mmio_bar, offset,
1575		       panic_info, sizeof(*panic_info));
1576
1577	/* then get the stack */
1578	offset += sizeof(*panic_info);
1579	sof_block_read(sdev, sdev->mmio_bar, offset, stack,
1580		       stack_words * sizeof(u32));
1581}
1582
1583/* dump the first 8 dwords representing the extended ROM status */
1584void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level,
1585				 u32 flags)
1586{
1587	const struct sof_intel_dsp_desc *chip;
1588	char msg[128];
1589	int len = 0;
1590	u32 value;
1591	int i;
1592
1593	chip = get_chip_info(sdev->pdata);
1594	for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
1595		value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
1596		len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
1597	}
1598
1599	dev_printk(level, sdev->dev, "extended rom status: %s", msg);
1600
1601}
1602
1603void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
1604{
1605	char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
1606	struct sof_ipc_dsp_oops_xtensa xoops;
1607	struct sof_ipc_panic_info panic_info;
1608	u32 stack[HDA_DSP_STACK_DUMP_SIZE];
1609
1610	/* print ROM/FW status */
1611	hda_dsp_get_state(sdev, level);
1612
1613	/* The firmware register dump only available with IPC3 */
1614	if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC_TYPE_3) {
1615		u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
1616		u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
1617
1618		hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
1619				      HDA_DSP_STACK_DUMP_SIZE);
1620		sof_print_oops_and_stack(sdev, level, status, panic, &xoops,
1621					 &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE);
1622	} else {
1623		hda_dsp_dump_ext_rom_status(sdev, level, flags);
1624	}
1625}
1626EXPORT_SYMBOL_NS(hda_dsp_dump, "SND_SOC_SOF_INTEL_HDA_COMMON");