Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2// Copyright(c) 2015-17 Intel Corporation.
   3
   4/*
   5 * Soundwire Intel Master Driver
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/debugfs.h>
  10#include <linux/delay.h>
 
 
  11#include <linux/io.h>
 
  12#include <sound/pcm_params.h>
  13#include <linux/pm_runtime.h>
  14#include <sound/soc.h>
  15#include <linux/soundwire/sdw_registers.h>
  16#include <linux/soundwire/sdw.h>
  17#include <linux/soundwire/sdw_intel.h>
  18#include "cadence_master.h"
  19#include "bus.h"
  20#include "intel.h"
  21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  22static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
  23{
  24	int timeout = 10;
  25	u32 reg_read;
  26
  27	do {
  28		reg_read = readl(base + offset);
  29		if ((reg_read & mask) == target)
  30			return 0;
  31
  32		timeout--;
  33		usleep_range(50, 100);
  34	} while (timeout != 0);
  35
  36	return -EAGAIN;
  37}
  38
  39static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
  40{
  41	writel(value, base + offset);
  42	return intel_wait_bit(base, offset, mask, 0);
  43}
  44
  45static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
  46{
  47	writel(value, base + offset);
  48	return intel_wait_bit(base, offset, mask, mask);
  49}
  50
  51/*
  52 * debugfs
  53 */
  54#ifdef CONFIG_DEBUG_FS
  55
  56#define RD_BUF (2 * PAGE_SIZE)
  57
  58static ssize_t intel_sprintf(void __iomem *mem, bool l,
  59			     char *buf, size_t pos, unsigned int reg)
  60{
  61	int value;
  62
  63	if (l)
  64		value = intel_readl(mem, reg);
  65	else
  66		value = intel_readw(mem, reg);
  67
  68	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
  69}
  70
  71static int intel_reg_show(struct seq_file *s_file, void *data)
  72{
  73	struct sdw_intel *sdw = s_file->private;
  74	void __iomem *s = sdw->link_res->shim;
  75	void __iomem *a = sdw->link_res->alh;
  76	char *buf;
  77	ssize_t ret;
  78	int i, j;
  79	unsigned int links, reg;
  80
  81	buf = kzalloc(RD_BUF, GFP_KERNEL);
  82	if (!buf)
  83		return -ENOMEM;
  84
  85	links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
  86
  87	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
  88	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
  89
  90	for (i = 0; i < links; i++) {
  91		reg = SDW_SHIM_LCAP + i * 4;
  92		ret += intel_sprintf(s, true, buf, ret, reg);
  93	}
  94
  95	for (i = 0; i < links; i++) {
  96		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
  97		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
  98		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
  99		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
 100		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
 101		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
 102		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
 103
 104		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
 105
 106		/*
 107		 * the value 10 is the number of PDIs. We will need a
 108		 * cleanup to remove hard-coded Intel configurations
 109		 * from cadence_master.c
 110		 */
 111		for (j = 0; j < 10; j++) {
 112			ret += intel_sprintf(s, false, buf, ret,
 113					SDW_SHIM_PCMSYCHM(i, j));
 114			ret += intel_sprintf(s, false, buf, ret,
 115					SDW_SHIM_PCMSYCHC(i, j));
 116		}
 117		ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
 118
 
 119		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
 120		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
 121	}
 122
 123	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
 124	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
 125	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
 126
 127	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
 128	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
 129		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
 130
 131	seq_printf(s_file, "%s", buf);
 132	kfree(buf);
 133
 134	return 0;
 135}
 136DEFINE_SHOW_ATTRIBUTE(intel_reg);
 137
 138static int intel_set_m_datamode(void *data, u64 value)
 139{
 140	struct sdw_intel *sdw = data;
 141	struct sdw_bus *bus = &sdw->cdns.bus;
 142
 143	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 144		return -EINVAL;
 145
 146	/* Userspace changed the hardware state behind the kernel's back */
 147	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 148
 149	bus->params.m_data_mode = value;
 150
 151	return 0;
 152}
 153DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
 154			 intel_set_m_datamode, "%llu\n");
 155
 156static int intel_set_s_datamode(void *data, u64 value)
 157{
 158	struct sdw_intel *sdw = data;
 159	struct sdw_bus *bus = &sdw->cdns.bus;
 160
 161	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 162		return -EINVAL;
 163
 164	/* Userspace changed the hardware state behind the kernel's back */
 165	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 166
 167	bus->params.s_data_mode = value;
 168
 169	return 0;
 170}
 171DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
 172			 intel_set_s_datamode, "%llu\n");
 173
 174static void intel_debugfs_init(struct sdw_intel *sdw)
 175{
 176	struct dentry *root = sdw->cdns.bus.debugfs;
 177
 178	if (!root)
 179		return;
 180
 181	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
 182
 183	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
 184			    &intel_reg_fops);
 185
 186	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
 187			    &intel_set_m_datamode_fops);
 188
 189	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
 190			    &intel_set_s_datamode_fops);
 191
 192	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
 193}
 194
 195static void intel_debugfs_exit(struct sdw_intel *sdw)
 196{
 197	debugfs_remove_recursive(sdw->debugfs);
 198}
 199#else
 200static void intel_debugfs_init(struct sdw_intel *sdw) {}
 201static void intel_debugfs_exit(struct sdw_intel *sdw) {}
 202#endif /* CONFIG_DEBUG_FS */
 203
 204/*
 205 * shim ops
 206 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207/* this needs to be called with shim_lock */
 208static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
 209{
 210	void __iomem *shim = sdw->link_res->shim;
 211	unsigned int link_id = sdw->instance;
 212	u16 ioctl;
 213
 214	/* Switch to MIP from Glue logic */
 215	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
 216
 217	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
 218	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 219	usleep_range(10, 15);
 220
 221	ioctl &= ~(SDW_SHIM_IOCTL_DO);
 222	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 223	usleep_range(10, 15);
 224
 225	ioctl |= (SDW_SHIM_IOCTL_MIF);
 226	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 227	usleep_range(10, 15);
 228
 229	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
 230	ioctl &= ~(SDW_SHIM_IOCTL_COE);
 231	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 232	usleep_range(10, 15);
 233
 234	/* at this point Master IP has full control of the I/Os */
 235}
 236
 237/* this needs to be called with shim_lock */
 238static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
 239{
 240	unsigned int link_id = sdw->instance;
 241	void __iomem *shim = sdw->link_res->shim;
 242	u16 ioctl;
 243
 244	/* Glue logic */
 245	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
 246	ioctl |= SDW_SHIM_IOCTL_BKE;
 247	ioctl |= SDW_SHIM_IOCTL_COE;
 248	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 249	usleep_range(10, 15);
 250
 251	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
 252	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 253	usleep_range(10, 15);
 254
 255	/* at this point Integration Glue has full control of the I/Os */
 256}
 257
 258/* this needs to be called with shim_lock */
 259static void intel_shim_init(struct sdw_intel *sdw)
 260{
 261	void __iomem *shim = sdw->link_res->shim;
 262	unsigned int link_id = sdw->instance;
 263	u16 ioctl = 0, act;
 
 
 
 264
 265	/* Initialize Shim */
 266	ioctl |= SDW_SHIM_IOCTL_BKE;
 267	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 268	usleep_range(10, 15);
 269
 270	ioctl |= SDW_SHIM_IOCTL_WPDD;
 271	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 272	usleep_range(10, 15);
 273
 274	ioctl |= SDW_SHIM_IOCTL_DO;
 275	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 276	usleep_range(10, 15);
 277
 278	ioctl |= SDW_SHIM_IOCTL_DOE;
 279	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 280	usleep_range(10, 15);
 281
 282	intel_shim_glue_to_master_ip(sdw);
 283
 284	act = intel_readw(shim, SDW_SHIM_CTMCTL(link_id));
 285	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
 286	act |= SDW_SHIM_CTMCTL_DACTQE;
 287	act |= SDW_SHIM_CTMCTL_DODS;
 288	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
 289	usleep_range(10, 15);
 290}
 291
 292static int intel_shim_check_wake(struct sdw_intel *sdw)
 293{
 294	void __iomem *shim;
 295	u16 wake_sts;
 296
 297	shim = sdw->link_res->shim;
 298	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 299
 300	return wake_sts & BIT(sdw->instance);
 301}
 302
 303static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
 304{
 305	void __iomem *shim = sdw->link_res->shim;
 306	unsigned int link_id = sdw->instance;
 307	u16 wake_en, wake_sts;
 308
 309	mutex_lock(sdw->link_res->shim_lock);
 310	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
 311
 312	if (wake_enable) {
 313		/* Enable the wakeup */
 314		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 315		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 316	} else {
 317		/* Disable the wake up interrupt */
 318		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
 319		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 320
 321		/* Clear wake status */
 322		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 323		wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
 324		intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
 325	}
 326	mutex_unlock(sdw->link_res->shim_lock);
 327}
 328
 329static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
 330{
 331	void __iomem *shim = sdw->link_res->shim;
 332	int sync_reg;
 333
 334	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 335	return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK);
 336}
 337
 338static int intel_link_power_up(struct sdw_intel *sdw)
 339{
 340	unsigned int link_id = sdw->instance;
 341	void __iomem *shim = sdw->link_res->shim;
 342	u32 *shim_mask = sdw->link_res->shim_mask;
 343	struct sdw_bus *bus = &sdw->cdns.bus;
 344	struct sdw_master_prop *prop = &bus->prop;
 345	u32 spa_mask, cpa_mask;
 346	u32 link_control;
 347	int ret = 0;
 348	u32 syncprd;
 349	u32 sync_reg;
 350
 351	mutex_lock(sdw->link_res->shim_lock);
 352
 353	/*
 354	 * The hardware relies on an internal counter, typically 4kHz,
 355	 * to generate the SoundWire SSP - which defines a 'safe'
 356	 * synchronization point between commands and audio transport
 357	 * and allows for multi link synchronization. The SYNCPRD value
 358	 * is only dependent on the oscillator clock provided to
 359	 * the IP, so adjust based on _DSD properties reported in DSDT
 360	 * tables. The values reported are based on either 24MHz
 361	 * (CNL/CML) or 38.4 MHz (ICL/TGL+).
 362	 */
 363	if (prop->mclk_freq % 6000000)
 364		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
 365	else
 366		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
 367
 368	if (!*shim_mask) {
 369		dev_dbg(sdw->cdns.dev, "powering up all links\n");
 370
 371		/* we first need to program the SyncPRD/CPU registers */
 372		dev_dbg(sdw->cdns.dev,
 373			"first link up, programming SYNCPRD\n");
 374
 375		/* set SyncPRD period */
 376		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 377		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
 378
 379		/* Set SyncCPU bit */
 380		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
 381		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 382
 383		/* Link power up sequence */
 384		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 385
 386		/* only power-up enabled links */
 387		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
 388		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 389
 390		link_control |=  spa_mask;
 391
 392		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 393		if (ret < 0) {
 394			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
 395			goto out;
 396		}
 397
 398		/* SyncCPU will change once link is active */
 399		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
 400				     SDW_SHIM_SYNC_SYNCCPU, 0);
 401		if (ret < 0) {
 402			dev_err(sdw->cdns.dev,
 403				"Failed to set SHIM_SYNC: %d\n", ret);
 404			goto out;
 405		}
 406	}
 407
 408	*shim_mask |= BIT(link_id);
 409
 410	sdw->cdns.link_up = true;
 411
 412	intel_shim_init(sdw);
 413
 414out:
 415	mutex_unlock(sdw->link_res->shim_lock);
 416
 417	return ret;
 418}
 419
 420static int intel_link_power_down(struct sdw_intel *sdw)
 421{
 422	u32 link_control, spa_mask, cpa_mask;
 423	unsigned int link_id = sdw->instance;
 424	void __iomem *shim = sdw->link_res->shim;
 425	u32 *shim_mask = sdw->link_res->shim_mask;
 426	int ret = 0;
 427
 428	mutex_lock(sdw->link_res->shim_lock);
 429
 430	if (!(*shim_mask & BIT(link_id)))
 431		dev_err(sdw->cdns.dev,
 432			"%s: Unbalanced power-up/down calls\n", __func__);
 433
 434	sdw->cdns.link_up = false;
 435
 436	intel_shim_master_ip_to_glue(sdw);
 437
 438	*shim_mask &= ~BIT(link_id);
 439
 440	if (!*shim_mask) {
 441
 442		dev_dbg(sdw->cdns.dev, "powering down all links\n");
 443
 444		/* Link power down sequence */
 445		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 446
 447		/* only power-down enabled links */
 448		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
 449		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 450
 451		link_control &=  spa_mask;
 452
 453		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 454		if (ret < 0) {
 455			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
 456
 457			/*
 458			 * we leave the sdw->cdns.link_up flag as false since we've disabled
 459			 * the link at this point and cannot handle interrupts any longer.
 460			 */
 461		}
 462	}
 463
 464	mutex_unlock(sdw->link_res->shim_lock);
 465
 466	return ret;
 467}
 468
 469static void intel_shim_sync_arm(struct sdw_intel *sdw)
 470{
 471	void __iomem *shim = sdw->link_res->shim;
 472	u32 sync_reg;
 473
 474	mutex_lock(sdw->link_res->shim_lock);
 475
 476	/* update SYNC register */
 477	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 478	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
 479	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 480
 481	mutex_unlock(sdw->link_res->shim_lock);
 482}
 483
 484static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
 485{
 486	void __iomem *shim = sdw->link_res->shim;
 487	u32 sync_reg;
 
 488
 489	/* Read SYNC register */
 490	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 491
 492	/*
 493	 * Set SyncGO bit to synchronously trigger a bank switch for
 494	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
 495	 * the Masters.
 496	 */
 497	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
 498
 499	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 
 500
 501	return 0;
 
 
 
 502}
 503
 504static int intel_shim_sync_go(struct sdw_intel *sdw)
 505{
 506	int ret;
 507
 508	mutex_lock(sdw->link_res->shim_lock);
 509
 510	ret = intel_shim_sync_go_unlocked(sdw);
 511
 512	mutex_unlock(sdw->link_res->shim_lock);
 513
 514	return ret;
 515}
 516
 517/*
 518 * PDI routines
 519 */
 520static void intel_pdi_init(struct sdw_intel *sdw,
 521			   struct sdw_cdns_stream_config *config)
 522{
 523	void __iomem *shim = sdw->link_res->shim;
 524	unsigned int link_id = sdw->instance;
 525	int pcm_cap;
 526
 527	/* PCM Stream Capability */
 528	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
 529
 530	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
 531	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
 532	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
 533
 534	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
 535		config->pcm_bd, config->pcm_in, config->pcm_out);
 
 
 
 
 
 
 
 
 
 
 536}
 537
 538static int
 539intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
 540{
 541	void __iomem *shim = sdw->link_res->shim;
 542	unsigned int link_id = sdw->instance;
 543	int count;
 544
 545	count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
 
 546
 547	/*
 548	 * WORKAROUND: on all existing Intel controllers, pdi
 549	 * number 2 reports channel count as 1 even though it
 550	 * supports 8 channels. Performing hardcoding for pdi
 551	 * number 2.
 552	 */
 553	if (pdi_num == 2)
 554		count = 7;
 
 
 
 
 
 555
 556	/* zero based values for channel count in register */
 557	count++;
 558
 559	return count;
 560}
 561
 562static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
 563				   struct sdw_cdns_pdi *pdi,
 564				   unsigned int num_pdi,
 565				   unsigned int *num_ch)
 566{
 567	int i, ch_count = 0;
 568
 569	for (i = 0; i < num_pdi; i++) {
 570		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
 571		ch_count += pdi->ch_count;
 572		pdi++;
 573	}
 574
 575	*num_ch = ch_count;
 576	return 0;
 577}
 578
 579static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
 580				      struct sdw_cdns_streams *stream)
 581{
 582	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
 583				&stream->num_ch_bd);
 584
 585	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
 586				&stream->num_ch_in);
 587
 588	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
 589				&stream->num_ch_out);
 
 
 
 
 
 
 
 
 
 590
 591	return 0;
 592}
 593
 594static void
 595intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 596{
 597	void __iomem *shim = sdw->link_res->shim;
 598	unsigned int link_id = sdw->instance;
 599	int pdi_conf = 0;
 600
 601	/* the Bulk and PCM streams are not contiguous */
 602	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 603	if (pdi->num >= 2)
 604		pdi->intel_alh_id += 2;
 605
 606	/*
 607	 * Program stream parameters to stream SHIM register
 608	 * This is applicable for PCM stream only.
 609	 */
 610	if (pdi->type != SDW_STREAM_PCM)
 611		return;
 612
 613	if (pdi->dir == SDW_DATA_DIR_RX)
 614		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
 615	else
 616		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
 617
 618	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
 619	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
 620	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
 621
 622	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
 623}
 624
 625static void
 626intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 627{
 628	void __iomem *alh = sdw->link_res->alh;
 629	unsigned int link_id = sdw->instance;
 630	unsigned int conf;
 631
 632	/* the Bulk and PCM streams are not contiguous */
 633	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 634	if (pdi->num >= 2)
 635		pdi->intel_alh_id += 2;
 636
 637	/* Program Stream config ALH register */
 638	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
 639
 640	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
 641	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
 642
 643	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
 644}
 645
 646static int intel_params_stream(struct sdw_intel *sdw,
 647			       struct snd_pcm_substream *substream,
 648			       struct snd_soc_dai *dai,
 649			       struct snd_pcm_hw_params *hw_params,
 650			       int link_id, int alh_stream_id)
 651{
 652	struct sdw_intel_link_res *res = sdw->link_res;
 653	struct sdw_intel_stream_params_data params_data;
 654
 655	params_data.substream = substream;
 656	params_data.dai = dai;
 657	params_data.hw_params = hw_params;
 658	params_data.link_id = link_id;
 659	params_data.alh_stream_id = alh_stream_id;
 660
 661	if (res->ops && res->ops->params_stream && res->dev)
 662		return res->ops->params_stream(res->dev,
 663					       &params_data);
 664	return -EIO;
 665}
 666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667/*
 668 * DAI routines
 669 */
 670
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671static int intel_hw_params(struct snd_pcm_substream *substream,
 672			   struct snd_pcm_hw_params *params,
 673			   struct snd_soc_dai *dai)
 674{
 675	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 676	struct sdw_intel *sdw = cdns_to_intel(cdns);
 677	struct sdw_cdns_dai_runtime *dai_runtime;
 678	struct sdw_cdns_pdi *pdi;
 679	struct sdw_stream_config sconfig;
 680	struct sdw_port_config *pconfig;
 681	int ch, dir;
 682	int ret;
 
 683
 684	dai_runtime = cdns->dai_runtime_array[dai->id];
 685	if (!dai_runtime)
 686		return -EIO;
 687
 688	ch = params_channels(params);
 689	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 690		dir = SDW_DATA_DIR_RX;
 691	else
 692		dir = SDW_DATA_DIR_TX;
 693
 694	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
 
 
 
 
 
 
 695
 696	if (!pdi) {
 697		ret = -EINVAL;
 698		goto error;
 699	}
 700
 701	/* do run-time configurations for SHIM, ALH and PDI/PORT */
 702	intel_pdi_shim_configure(sdw, pdi);
 703	intel_pdi_alh_configure(sdw, pdi);
 704	sdw_cdns_config_stream(cdns, ch, dir, pdi);
 705
 706	/* store pdi and hw_params, may be needed in prepare step */
 707	dai_runtime->paused = false;
 708	dai_runtime->suspended = false;
 709	dai_runtime->pdi = pdi;
 710
 711	/* Inform DSP about PDI stream number */
 712	ret = intel_params_stream(sdw, substream, dai, params,
 713				  sdw->instance,
 714				  pdi->intel_alh_id);
 715	if (ret)
 716		goto error;
 717
 718	sconfig.direction = dir;
 719	sconfig.ch_count = ch;
 720	sconfig.frame_rate = params_rate(params);
 721	sconfig.type = dai_runtime->stream_type;
 722
 723	sconfig.bps = snd_pcm_format_width(params_format(params));
 
 
 
 
 
 724
 725	/* Port configuration */
 726	pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
 727	if (!pconfig) {
 728		ret =  -ENOMEM;
 729		goto error;
 730	}
 731
 732	pconfig->num = pdi->num;
 733	pconfig->ch_mask = (1 << ch) - 1;
 734
 735	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
 736				    pconfig, 1, dai_runtime->stream);
 737	if (ret)
 738		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
 739
 740	kfree(pconfig);
 741error:
 742	return ret;
 743}
 744
 745static int intel_prepare(struct snd_pcm_substream *substream,
 746			 struct snd_soc_dai *dai)
 747{
 748	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 749	struct sdw_intel *sdw = cdns_to_intel(cdns);
 750	struct sdw_cdns_dai_runtime *dai_runtime;
 751	int ch, dir;
 752	int ret = 0;
 753
 754	dai_runtime = cdns->dai_runtime_array[dai->id];
 755	if (!dai_runtime) {
 756		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 757			__func__);
 758		return -EIO;
 759	}
 760
 761	if (dai_runtime->suspended) {
 762		struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
 763		struct snd_pcm_hw_params *hw_params;
 764
 765		hw_params = &rtd->dpcm[substream->stream].hw_params;
 766
 767		dai_runtime->suspended = false;
 768
 769		/*
 770		 * .prepare() is called after system resume, where we
 771		 * need to reinitialize the SHIM/ALH/Cadence IP.
 772		 * .prepare() is also called to deal with underflows,
 773		 * but in those cases we cannot touch ALH/SHIM
 774		 * registers
 775		 */
 776
 777		/* configure stream */
 778		ch = params_channels(hw_params);
 779		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 780			dir = SDW_DATA_DIR_RX;
 781		else
 782			dir = SDW_DATA_DIR_TX;
 783
 784		intel_pdi_shim_configure(sdw, dai_runtime->pdi);
 785		intel_pdi_alh_configure(sdw, dai_runtime->pdi);
 786		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
 787
 788		/* Inform DSP about PDI stream number */
 789		ret = intel_params_stream(sdw, substream, dai,
 790					  hw_params,
 791					  sdw->instance,
 792					  dai_runtime->pdi->intel_alh_id);
 793	}
 794
 795	return ret;
 796}
 797
 798static int
 799intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
 800{
 801	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 802	struct sdw_cdns_dai_runtime *dai_runtime;
 
 803	int ret;
 804
 805	dai_runtime = cdns->dai_runtime_array[dai->id];
 806	if (!dai_runtime)
 807		return -EIO;
 808
 809	/*
 810	 * The sdw stream state will transition to RELEASED when stream->
 811	 * master_list is empty. So the stream state will transition to
 812	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
 813	 * cpu-dai.
 814	 */
 815	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
 816	if (ret < 0) {
 817		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
 818			dai_runtime->stream->name, ret);
 
 
 
 
 
 
 819		return ret;
 820	}
 821
 822	dai_runtime->pdi = NULL;
 
 823
 824	return 0;
 825}
 826
 827static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
 828				    void *stream, int direction)
 829{
 830	return cdns_set_sdw_stream(dai, stream, direction);
 831}
 832
 833static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
 834				  int direction)
 835{
 836	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 837	struct sdw_cdns_dai_runtime *dai_runtime;
 838
 839	dai_runtime = cdns->dai_runtime_array[dai->id];
 840	if (!dai_runtime)
 841		return ERR_PTR(-EINVAL);
 842
 843	return dai_runtime->stream;
 844}
 845
 846static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
 847{
 848	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 849	struct sdw_cdns_dai_runtime *dai_runtime;
 850	int ret = 0;
 851
 852	dai_runtime = cdns->dai_runtime_array[dai->id];
 853	if (!dai_runtime) {
 854		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 855			__func__);
 856		return -EIO;
 857	}
 858
 859	switch (cmd) {
 860	case SNDRV_PCM_TRIGGER_SUSPEND:
 861
 
 862		/*
 863		 * The .prepare callback is used to deal with xruns and resume operations.
 864		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
 865		 * but for resume operations the DMAs and SHIM registers need to be initialized.
 866		 * the .trigger callback is used to track the suspend case only.
 867		 */
 868
 869		dai_runtime->suspended = true;
 870
 871		break;
 872
 873	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
 874		dai_runtime->paused = true;
 875		break;
 876	case SNDRV_PCM_TRIGGER_STOP:
 877	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 878		dai_runtime->paused = false;
 879		break;
 880	default:
 881		break;
 882	}
 883
 884	return ret;
 885}
 886
 887static int intel_component_probe(struct snd_soc_component *component)
 
 888{
 889	int ret;
 890
 891	/*
 892	 * make sure the device is pm_runtime_active before initiating
 893	 * bus transactions during the card registration.
 894	 * We use pm_runtime_resume() here, without taking a reference
 895	 * and releasing it immediately.
 896	 */
 897	ret = pm_runtime_resume(component->dev);
 898	if (ret < 0 && ret != -EACCES)
 899		return ret;
 900
 901	return 0;
 902}
 903
 904static int intel_component_dais_suspend(struct snd_soc_component *component)
 
 905{
 906	struct snd_soc_dai *dai;
 907
 908	/*
 909	 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
 910	 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
 911	 * Since the component suspend is called last, we can trap this corner case
 912	 * and force the DAIs to release their resources.
 913	 */
 914	for_each_component_dais(component, dai) {
 915		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 916		struct sdw_cdns_dai_runtime *dai_runtime;
 917
 918		dai_runtime = cdns->dai_runtime_array[dai->id];
 919
 920		if (!dai_runtime)
 921			continue;
 
 
 922
 923		if (dai_runtime->suspended)
 924			continue;
 
 
 925
 926		if (dai_runtime->paused)
 927			dai_runtime->suspended = true;
 928	}
 929
 930	return 0;
 931}
 932
 933static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
 
 
 
 
 
 
 
 
 
 
 
 934	.hw_params = intel_hw_params,
 935	.prepare = intel_prepare,
 936	.hw_free = intel_hw_free,
 937	.trigger = intel_trigger,
 938	.set_stream = intel_pcm_set_sdw_stream,
 939	.get_stream = intel_get_sdw_stream,
 940};
 941
 942static const struct snd_soc_component_driver dai_component = {
 943	.name			= "soundwire",
 944	.probe			= intel_component_probe,
 945	.suspend		= intel_component_dais_suspend,
 946	.legacy_dai_naming	= 1,
 947};
 948
 949static int intel_create_dai(struct sdw_cdns *cdns,
 950			    struct snd_soc_dai_driver *dais,
 951			    enum intel_pdi_type type,
 952			    u32 num, u32 off, u32 max_ch)
 953{
 954	int i;
 955
 956	if (num == 0)
 957		return 0;
 958
 
 959	for (i = off; i < (off + num); i++) {
 960		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
 961					      "SDW%d Pin%d",
 962					      cdns->instance, i);
 963		if (!dais[i].name)
 964			return -ENOMEM;
 965
 966		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
 967			dais[i].playback.channels_min = 1;
 968			dais[i].playback.channels_max = max_ch;
 
 
 969		}
 970
 971		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
 972			dais[i].capture.channels_min = 1;
 973			dais[i].capture.channels_max = max_ch;
 
 
 974		}
 975
 976		dais[i].ops = &intel_pcm_dai_ops;
 
 
 
 977	}
 978
 979	return 0;
 980}
 981
 982static int intel_register_dai(struct sdw_intel *sdw)
 983{
 984	struct sdw_cdns_dai_runtime **dai_runtime_array;
 985	struct sdw_cdns_stream_config config;
 986	struct sdw_cdns *cdns = &sdw->cdns;
 987	struct sdw_cdns_streams *stream;
 988	struct snd_soc_dai_driver *dais;
 989	int num_dai, ret, off = 0;
 990
 991	/* Read the PDI config and initialize cadence PDI */
 992	intel_pdi_init(sdw, &config);
 993	ret = sdw_cdns_pdi_init(cdns, config);
 994	if (ret)
 995		return ret;
 996
 997	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
 998
 999	/* DAIs are created based on total number of PDIs supported */
1000	num_dai = cdns->pcm.num_pdi;
1001
1002	dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1003					 sizeof(struct sdw_cdns_dai_runtime *),
1004					 GFP_KERNEL);
1005	if (!dai_runtime_array)
1006		return -ENOMEM;
1007	cdns->dai_runtime_array = dai_runtime_array;
1008
1009	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1010	if (!dais)
1011		return -ENOMEM;
1012
1013	/* Create PCM DAIs */
1014	stream = &cdns->pcm;
1015
1016	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1017			       off, stream->num_ch_in);
1018	if (ret)
1019		return ret;
1020
1021	off += cdns->pcm.num_in;
1022	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1023			       off, stream->num_ch_out);
1024	if (ret)
1025		return ret;
1026
1027	off += cdns->pcm.num_out;
1028	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1029			       off, stream->num_ch_bd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030	if (ret)
1031		return ret;
1032
1033	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1034					       dais, num_dai);
1035}
1036
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037
1038const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1039	.debugfs_init = intel_debugfs_init,
1040	.debugfs_exit = intel_debugfs_exit,
 
 
 
 
 
 
 
1041
1042	.register_dai = intel_register_dai,
 
1043
1044	.check_clock_stop = intel_check_clock_stop,
1045	.start_bus = intel_start_bus,
1046	.start_bus_after_reset = intel_start_bus_after_reset,
1047	.start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1048	.stop_bus = intel_stop_bus,
1049
1050	.link_power_up = intel_link_power_up,
1051	.link_power_down = intel_link_power_down,
1052
1053	.shim_check_wake = intel_shim_check_wake,
1054	.shim_wake = intel_shim_wake,
 
 
 
 
 
 
 
 
1055
 
 
 
 
 
 
 
1056	.pre_bank_switch = intel_pre_bank_switch,
1057	.post_bank_switch = intel_post_bank_switch,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058
1059	.sync_arm = intel_shim_sync_arm,
1060	.sync_go_unlocked = intel_shim_sync_go_unlocked,
1061	.sync_go = intel_shim_sync_go,
1062	.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
 
 
 
 
1063};
1064EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL);
1065
v5.14.15
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2// Copyright(c) 2015-17 Intel Corporation.
   3
   4/*
   5 * Soundwire Intel Master Driver
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/debugfs.h>
  10#include <linux/delay.h>
  11#include <linux/module.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/auxiliary_bus.h>
  15#include <sound/pcm_params.h>
  16#include <linux/pm_runtime.h>
  17#include <sound/soc.h>
  18#include <linux/soundwire/sdw_registers.h>
  19#include <linux/soundwire/sdw.h>
  20#include <linux/soundwire/sdw_intel.h>
  21#include "cadence_master.h"
  22#include "bus.h"
  23#include "intel.h"
  24
  25#define INTEL_MASTER_SUSPEND_DELAY_MS	3000
  26
  27/*
  28 * debug/config flags for the Intel SoundWire Master.
  29 *
  30 * Since we may have multiple masters active, we can have up to 8
  31 * flags reused in each byte, with master0 using the ls-byte, etc.
  32 */
  33
  34#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME		BIT(0)
  35#define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP		BIT(1)
  36#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE	BIT(2)
  37#define SDW_INTEL_MASTER_DISABLE_MULTI_LINK		BIT(3)
  38
  39static int md_flags;
  40module_param_named(sdw_md_flags, md_flags, int, 0444);
  41MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
  42
  43/* Intel SHIM Registers Definition */
  44#define SDW_SHIM_LCAP			0x0
  45#define SDW_SHIM_LCTL			0x4
  46#define SDW_SHIM_IPPTR			0x8
  47#define SDW_SHIM_SYNC			0xC
  48
  49#define SDW_SHIM_CTLSCAP(x)		(0x010 + 0x60 * (x))
  50#define SDW_SHIM_CTLS0CM(x)		(0x012 + 0x60 * (x))
  51#define SDW_SHIM_CTLS1CM(x)		(0x014 + 0x60 * (x))
  52#define SDW_SHIM_CTLS2CM(x)		(0x016 + 0x60 * (x))
  53#define SDW_SHIM_CTLS3CM(x)		(0x018 + 0x60 * (x))
  54#define SDW_SHIM_PCMSCAP(x)		(0x020 + 0x60 * (x))
  55
  56#define SDW_SHIM_PCMSYCHM(x, y)		(0x022 + (0x60 * (x)) + (0x2 * (y)))
  57#define SDW_SHIM_PCMSYCHC(x, y)		(0x042 + (0x60 * (x)) + (0x2 * (y)))
  58#define SDW_SHIM_PDMSCAP(x)		(0x062 + 0x60 * (x))
  59#define SDW_SHIM_IOCTL(x)		(0x06C + 0x60 * (x))
  60#define SDW_SHIM_CTMCTL(x)		(0x06E + 0x60 * (x))
  61
  62#define SDW_SHIM_WAKEEN			0x190
  63#define SDW_SHIM_WAKESTS		0x192
  64
  65#define SDW_SHIM_LCTL_SPA		BIT(0)
  66#define SDW_SHIM_LCTL_SPA_MASK		GENMASK(3, 0)
  67#define SDW_SHIM_LCTL_CPA		BIT(8)
  68#define SDW_SHIM_LCTL_CPA_MASK		GENMASK(11, 8)
  69
  70#define SDW_SHIM_SYNC_SYNCPRD_VAL_24	(24000 / SDW_CADENCE_GSYNC_KHZ - 1)
  71#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4	(38400 / SDW_CADENCE_GSYNC_KHZ - 1)
  72#define SDW_SHIM_SYNC_SYNCPRD		GENMASK(14, 0)
  73#define SDW_SHIM_SYNC_SYNCCPU		BIT(15)
  74#define SDW_SHIM_SYNC_CMDSYNC_MASK	GENMASK(19, 16)
  75#define SDW_SHIM_SYNC_CMDSYNC		BIT(16)
  76#define SDW_SHIM_SYNC_SYNCGO		BIT(24)
  77
  78#define SDW_SHIM_PCMSCAP_ISS		GENMASK(3, 0)
  79#define SDW_SHIM_PCMSCAP_OSS		GENMASK(7, 4)
  80#define SDW_SHIM_PCMSCAP_BSS		GENMASK(12, 8)
  81
  82#define SDW_SHIM_PCMSYCM_LCHN		GENMASK(3, 0)
  83#define SDW_SHIM_PCMSYCM_HCHN		GENMASK(7, 4)
  84#define SDW_SHIM_PCMSYCM_STREAM		GENMASK(13, 8)
  85#define SDW_SHIM_PCMSYCM_DIR		BIT(15)
  86
  87#define SDW_SHIM_PDMSCAP_ISS		GENMASK(3, 0)
  88#define SDW_SHIM_PDMSCAP_OSS		GENMASK(7, 4)
  89#define SDW_SHIM_PDMSCAP_BSS		GENMASK(12, 8)
  90#define SDW_SHIM_PDMSCAP_CPSS		GENMASK(15, 13)
  91
  92#define SDW_SHIM_IOCTL_MIF		BIT(0)
  93#define SDW_SHIM_IOCTL_CO		BIT(1)
  94#define SDW_SHIM_IOCTL_COE		BIT(2)
  95#define SDW_SHIM_IOCTL_DO		BIT(3)
  96#define SDW_SHIM_IOCTL_DOE		BIT(4)
  97#define SDW_SHIM_IOCTL_BKE		BIT(5)
  98#define SDW_SHIM_IOCTL_WPDD		BIT(6)
  99#define SDW_SHIM_IOCTL_CIBD		BIT(8)
 100#define SDW_SHIM_IOCTL_DIBD		BIT(9)
 101
 102#define SDW_SHIM_CTMCTL_DACTQE		BIT(0)
 103#define SDW_SHIM_CTMCTL_DODS		BIT(1)
 104#define SDW_SHIM_CTMCTL_DOAIS		GENMASK(4, 3)
 105
 106#define SDW_SHIM_WAKEEN_ENABLE		BIT(0)
 107#define SDW_SHIM_WAKESTS_STATUS		BIT(0)
 108
 109/* Intel ALH Register definitions */
 110#define SDW_ALH_STRMZCFG(x)		(0x000 + (0x4 * (x)))
 111#define SDW_ALH_NUM_STREAMS		64
 112
 113#define SDW_ALH_STRMZCFG_DMAT_VAL	0x3
 114#define SDW_ALH_STRMZCFG_DMAT		GENMASK(7, 0)
 115#define SDW_ALH_STRMZCFG_CHN		GENMASK(19, 16)
 116
 117enum intel_pdi_type {
 118	INTEL_PDI_IN = 0,
 119	INTEL_PDI_OUT = 1,
 120	INTEL_PDI_BD = 2,
 121};
 122
 123#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
 124
 125/*
 126 * Read, write helpers for HW registers
 127 */
 128static inline int intel_readl(void __iomem *base, int offset)
 129{
 130	return readl(base + offset);
 131}
 132
 133static inline void intel_writel(void __iomem *base, int offset, int value)
 134{
 135	writel(value, base + offset);
 136}
 137
 138static inline u16 intel_readw(void __iomem *base, int offset)
 139{
 140	return readw(base + offset);
 141}
 142
 143static inline void intel_writew(void __iomem *base, int offset, u16 value)
 144{
 145	writew(value, base + offset);
 146}
 147
 148static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
 149{
 150	int timeout = 10;
 151	u32 reg_read;
 152
 153	do {
 154		reg_read = readl(base + offset);
 155		if ((reg_read & mask) == target)
 156			return 0;
 157
 158		timeout--;
 159		usleep_range(50, 100);
 160	} while (timeout != 0);
 161
 162	return -EAGAIN;
 163}
 164
 165static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
 166{
 167	writel(value, base + offset);
 168	return intel_wait_bit(base, offset, mask, 0);
 169}
 170
 171static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
 172{
 173	writel(value, base + offset);
 174	return intel_wait_bit(base, offset, mask, mask);
 175}
 176
 177/*
 178 * debugfs
 179 */
 180#ifdef CONFIG_DEBUG_FS
 181
 182#define RD_BUF (2 * PAGE_SIZE)
 183
 184static ssize_t intel_sprintf(void __iomem *mem, bool l,
 185			     char *buf, size_t pos, unsigned int reg)
 186{
 187	int value;
 188
 189	if (l)
 190		value = intel_readl(mem, reg);
 191	else
 192		value = intel_readw(mem, reg);
 193
 194	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
 195}
 196
 197static int intel_reg_show(struct seq_file *s_file, void *data)
 198{
 199	struct sdw_intel *sdw = s_file->private;
 200	void __iomem *s = sdw->link_res->shim;
 201	void __iomem *a = sdw->link_res->alh;
 202	char *buf;
 203	ssize_t ret;
 204	int i, j;
 205	unsigned int links, reg;
 206
 207	buf = kzalloc(RD_BUF, GFP_KERNEL);
 208	if (!buf)
 209		return -ENOMEM;
 210
 211	links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
 212
 213	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
 214	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
 215
 216	for (i = 0; i < links; i++) {
 217		reg = SDW_SHIM_LCAP + i * 4;
 218		ret += intel_sprintf(s, true, buf, ret, reg);
 219	}
 220
 221	for (i = 0; i < links; i++) {
 222		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
 223		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
 224		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
 225		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
 226		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
 227		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
 228		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
 229
 230		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
 231
 232		/*
 233		 * the value 10 is the number of PDIs. We will need a
 234		 * cleanup to remove hard-coded Intel configurations
 235		 * from cadence_master.c
 236		 */
 237		for (j = 0; j < 10; j++) {
 238			ret += intel_sprintf(s, false, buf, ret,
 239					SDW_SHIM_PCMSYCHM(i, j));
 240			ret += intel_sprintf(s, false, buf, ret,
 241					SDW_SHIM_PCMSYCHC(i, j));
 242		}
 243		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
 244
 245		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
 246		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
 247		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
 248	}
 249
 250	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
 251	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
 252	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
 253
 254	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
 255	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
 256		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
 257
 258	seq_printf(s_file, "%s", buf);
 259	kfree(buf);
 260
 261	return 0;
 262}
 263DEFINE_SHOW_ATTRIBUTE(intel_reg);
 264
 265static int intel_set_m_datamode(void *data, u64 value)
 266{
 267	struct sdw_intel *sdw = data;
 268	struct sdw_bus *bus = &sdw->cdns.bus;
 269
 270	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 271		return -EINVAL;
 272
 273	/* Userspace changed the hardware state behind the kernel's back */
 274	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 275
 276	bus->params.m_data_mode = value;
 277
 278	return 0;
 279}
 280DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
 281			 intel_set_m_datamode, "%llu\n");
 282
 283static int intel_set_s_datamode(void *data, u64 value)
 284{
 285	struct sdw_intel *sdw = data;
 286	struct sdw_bus *bus = &sdw->cdns.bus;
 287
 288	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 289		return -EINVAL;
 290
 291	/* Userspace changed the hardware state behind the kernel's back */
 292	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 293
 294	bus->params.s_data_mode = value;
 295
 296	return 0;
 297}
 298DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
 299			 intel_set_s_datamode, "%llu\n");
 300
 301static void intel_debugfs_init(struct sdw_intel *sdw)
 302{
 303	struct dentry *root = sdw->cdns.bus.debugfs;
 304
 305	if (!root)
 306		return;
 307
 308	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
 309
 310	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
 311			    &intel_reg_fops);
 312
 313	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
 314			    &intel_set_m_datamode_fops);
 315
 316	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
 317			    &intel_set_s_datamode_fops);
 318
 319	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
 320}
 321
 322static void intel_debugfs_exit(struct sdw_intel *sdw)
 323{
 324	debugfs_remove_recursive(sdw->debugfs);
 325}
 326#else
 327static void intel_debugfs_init(struct sdw_intel *sdw) {}
 328static void intel_debugfs_exit(struct sdw_intel *sdw) {}
 329#endif /* CONFIG_DEBUG_FS */
 330
 331/*
 332 * shim ops
 333 */
 334
 335static int intel_link_power_up(struct sdw_intel *sdw)
 336{
 337	unsigned int link_id = sdw->instance;
 338	void __iomem *shim = sdw->link_res->shim;
 339	u32 *shim_mask = sdw->link_res->shim_mask;
 340	struct sdw_bus *bus = &sdw->cdns.bus;
 341	struct sdw_master_prop *prop = &bus->prop;
 342	u32 spa_mask, cpa_mask;
 343	u32 link_control;
 344	int ret = 0;
 345	u32 syncprd;
 346	u32 sync_reg;
 347
 348	mutex_lock(sdw->link_res->shim_lock);
 349
 350	/*
 351	 * The hardware relies on an internal counter, typically 4kHz,
 352	 * to generate the SoundWire SSP - which defines a 'safe'
 353	 * synchronization point between commands and audio transport
 354	 * and allows for multi link synchronization. The SYNCPRD value
 355	 * is only dependent on the oscillator clock provided to
 356	 * the IP, so adjust based on _DSD properties reported in DSDT
 357	 * tables. The values reported are based on either 24MHz
 358	 * (CNL/CML) or 38.4 MHz (ICL/TGL+).
 359	 */
 360	if (prop->mclk_freq % 6000000)
 361		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
 362	else
 363		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
 364
 365	if (!*shim_mask) {
 366		dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
 367
 368		/* we first need to program the SyncPRD/CPU registers */
 369		dev_dbg(sdw->cdns.dev,
 370			"%s: first link up, programming SYNCPRD\n", __func__);
 371
 372		/* set SyncPRD period */
 373		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 374		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
 375
 376		/* Set SyncCPU bit */
 377		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
 378		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 379
 380		/* Link power up sequence */
 381		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 382
 383		/* only power-up enabled links */
 384		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
 385		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 386
 387		link_control |=  spa_mask;
 388
 389		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 390		if (ret < 0) {
 391			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
 392			goto out;
 393		}
 394
 395		/* SyncCPU will change once link is active */
 396		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
 397				     SDW_SHIM_SYNC_SYNCCPU, 0);
 398		if (ret < 0) {
 399			dev_err(sdw->cdns.dev,
 400				"Failed to set SHIM_SYNC: %d\n", ret);
 401			goto out;
 402		}
 403	}
 404
 405	*shim_mask |= BIT(link_id);
 406
 407	sdw->cdns.link_up = true;
 408out:
 409	mutex_unlock(sdw->link_res->shim_lock);
 410
 411	return ret;
 412}
 413
 414/* this needs to be called with shim_lock */
 415static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
 416{
 417	void __iomem *shim = sdw->link_res->shim;
 418	unsigned int link_id = sdw->instance;
 419	u16 ioctl;
 420
 421	/* Switch to MIP from Glue logic */
 422	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
 423
 424	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
 425	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 426	usleep_range(10, 15);
 427
 428	ioctl &= ~(SDW_SHIM_IOCTL_DO);
 429	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 430	usleep_range(10, 15);
 431
 432	ioctl |= (SDW_SHIM_IOCTL_MIF);
 433	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 434	usleep_range(10, 15);
 435
 436	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
 437	ioctl &= ~(SDW_SHIM_IOCTL_COE);
 438	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 439	usleep_range(10, 15);
 440
 441	/* at this point Master IP has full control of the I/Os */
 442}
 443
 444/* this needs to be called with shim_lock */
 445static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
 446{
 447	unsigned int link_id = sdw->instance;
 448	void __iomem *shim = sdw->link_res->shim;
 449	u16 ioctl;
 450
 451	/* Glue logic */
 452	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
 453	ioctl |= SDW_SHIM_IOCTL_BKE;
 454	ioctl |= SDW_SHIM_IOCTL_COE;
 455	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 456	usleep_range(10, 15);
 457
 458	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
 459	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 460	usleep_range(10, 15);
 461
 462	/* at this point Integration Glue has full control of the I/Os */
 463}
 464
 465static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
 
 466{
 467	void __iomem *shim = sdw->link_res->shim;
 468	unsigned int link_id = sdw->instance;
 469	int ret = 0;
 470	u16 ioctl = 0, act = 0;
 471
 472	mutex_lock(sdw->link_res->shim_lock);
 473
 474	/* Initialize Shim */
 475	ioctl |= SDW_SHIM_IOCTL_BKE;
 476	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 477	usleep_range(10, 15);
 478
 479	ioctl |= SDW_SHIM_IOCTL_WPDD;
 480	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 481	usleep_range(10, 15);
 482
 483	ioctl |= SDW_SHIM_IOCTL_DO;
 484	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 485	usleep_range(10, 15);
 486
 487	ioctl |= SDW_SHIM_IOCTL_DOE;
 488	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 489	usleep_range(10, 15);
 490
 491	intel_shim_glue_to_master_ip(sdw);
 492
 
 493	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
 494	act |= SDW_SHIM_CTMCTL_DACTQE;
 495	act |= SDW_SHIM_CTMCTL_DODS;
 496	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
 497	usleep_range(10, 15);
 
 
 
 
 
 
 498
 499	mutex_unlock(sdw->link_res->shim_lock);
 
 500
 501	return ret;
 502}
 503
 504static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
 505{
 506	void __iomem *shim = sdw->link_res->shim;
 507	unsigned int link_id = sdw->instance;
 508	u16 wake_en, wake_sts;
 509
 510	mutex_lock(sdw->link_res->shim_lock);
 511	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
 512
 513	if (wake_enable) {
 514		/* Enable the wakeup */
 515		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 516		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 517	} else {
 518		/* Disable the wake up interrupt */
 519		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
 520		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 521
 522		/* Clear wake status */
 523		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 524		wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 525		intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts);
 526	}
 527	mutex_unlock(sdw->link_res->shim_lock);
 528}
 529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530static int intel_link_power_down(struct sdw_intel *sdw)
 531{
 532	u32 link_control, spa_mask, cpa_mask;
 533	unsigned int link_id = sdw->instance;
 534	void __iomem *shim = sdw->link_res->shim;
 535	u32 *shim_mask = sdw->link_res->shim_mask;
 536	int ret = 0;
 537
 538	mutex_lock(sdw->link_res->shim_lock);
 539
 540	if (!(*shim_mask & BIT(link_id)))
 541		dev_err(sdw->cdns.dev,
 542			"%s: Unbalanced power-up/down calls\n", __func__);
 543
 544	sdw->cdns.link_up = false;
 545
 546	intel_shim_master_ip_to_glue(sdw);
 547
 548	*shim_mask &= ~BIT(link_id);
 549
 550	if (!*shim_mask) {
 551
 552		dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
 553
 554		/* Link power down sequence */
 555		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 556
 557		/* only power-down enabled links */
 558		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
 559		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 560
 561		link_control &=  spa_mask;
 562
 563		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 564		if (ret < 0) {
 565			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
 566
 567			/*
 568			 * we leave the sdw->cdns.link_up flag as false since we've disabled
 569			 * the link at this point and cannot handle interrupts any longer.
 570			 */
 571		}
 572	}
 573
 574	mutex_unlock(sdw->link_res->shim_lock);
 575
 576	return ret;
 577}
 578
 579static void intel_shim_sync_arm(struct sdw_intel *sdw)
 580{
 581	void __iomem *shim = sdw->link_res->shim;
 582	u32 sync_reg;
 583
 584	mutex_lock(sdw->link_res->shim_lock);
 585
 586	/* update SYNC register */
 587	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 588	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
 589	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 590
 591	mutex_unlock(sdw->link_res->shim_lock);
 592}
 593
 594static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
 595{
 596	void __iomem *shim = sdw->link_res->shim;
 597	u32 sync_reg;
 598	int ret;
 599
 600	/* Read SYNC register */
 601	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 602
 603	/*
 604	 * Set SyncGO bit to synchronously trigger a bank switch for
 605	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
 606	 * the Masters.
 607	 */
 608	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
 609
 610	ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
 611			      SDW_SHIM_SYNC_SYNCGO);
 612
 613	if (ret < 0)
 614		dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
 615
 616	return ret;
 617}
 618
 619static int intel_shim_sync_go(struct sdw_intel *sdw)
 620{
 621	int ret;
 622
 623	mutex_lock(sdw->link_res->shim_lock);
 624
 625	ret = intel_shim_sync_go_unlocked(sdw);
 626
 627	mutex_unlock(sdw->link_res->shim_lock);
 628
 629	return ret;
 630}
 631
 632/*
 633 * PDI routines
 634 */
 635static void intel_pdi_init(struct sdw_intel *sdw,
 636			   struct sdw_cdns_stream_config *config)
 637{
 638	void __iomem *shim = sdw->link_res->shim;
 639	unsigned int link_id = sdw->instance;
 640	int pcm_cap, pdm_cap;
 641
 642	/* PCM Stream Capability */
 643	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
 644
 645	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
 646	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
 647	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
 648
 649	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
 650		config->pcm_bd, config->pcm_in, config->pcm_out);
 651
 652	/* PDM Stream Capability */
 653	pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
 654
 655	config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
 656	config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
 657	config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
 658
 659	dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
 660		config->pdm_bd, config->pdm_in, config->pdm_out);
 661}
 662
 663static int
 664intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
 665{
 666	void __iomem *shim = sdw->link_res->shim;
 667	unsigned int link_id = sdw->instance;
 668	int count;
 669
 670	if (pcm) {
 671		count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
 672
 673		/*
 674		 * WORKAROUND: on all existing Intel controllers, pdi
 675		 * number 2 reports channel count as 1 even though it
 676		 * supports 8 channels. Performing hardcoding for pdi
 677		 * number 2.
 678		 */
 679		if (pdi_num == 2)
 680			count = 7;
 681
 682	} else {
 683		count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
 684		count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
 685	}
 686
 687	/* zero based values for channel count in register */
 688	count++;
 689
 690	return count;
 691}
 692
 693static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
 694				   struct sdw_cdns_pdi *pdi,
 695				   unsigned int num_pdi,
 696				   unsigned int *num_ch, bool pcm)
 697{
 698	int i, ch_count = 0;
 699
 700	for (i = 0; i < num_pdi; i++) {
 701		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm);
 702		ch_count += pdi->ch_count;
 703		pdi++;
 704	}
 705
 706	*num_ch = ch_count;
 707	return 0;
 708}
 709
 710static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
 711				      struct sdw_cdns_streams *stream, bool pcm)
 712{
 713	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
 714				&stream->num_ch_bd, pcm);
 715
 716	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
 717				&stream->num_ch_in, pcm);
 718
 719	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
 720				&stream->num_ch_out, pcm);
 721
 722	return 0;
 723}
 724
 725static int intel_pdi_ch_update(struct sdw_intel *sdw)
 726{
 727	/* First update PCM streams followed by PDM streams */
 728	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true);
 729	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false);
 730
 731	return 0;
 732}
 733
 734static void
 735intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 736{
 737	void __iomem *shim = sdw->link_res->shim;
 738	unsigned int link_id = sdw->instance;
 739	int pdi_conf = 0;
 740
 741	/* the Bulk and PCM streams are not contiguous */
 742	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 743	if (pdi->num >= 2)
 744		pdi->intel_alh_id += 2;
 745
 746	/*
 747	 * Program stream parameters to stream SHIM register
 748	 * This is applicable for PCM stream only.
 749	 */
 750	if (pdi->type != SDW_STREAM_PCM)
 751		return;
 752
 753	if (pdi->dir == SDW_DATA_DIR_RX)
 754		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
 755	else
 756		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
 757
 758	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
 759	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
 760	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
 761
 762	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
 763}
 764
 765static void
 766intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 767{
 768	void __iomem *alh = sdw->link_res->alh;
 769	unsigned int link_id = sdw->instance;
 770	unsigned int conf;
 771
 772	/* the Bulk and PCM streams are not contiguous */
 773	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 774	if (pdi->num >= 2)
 775		pdi->intel_alh_id += 2;
 776
 777	/* Program Stream config ALH register */
 778	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
 779
 780	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
 781	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
 782
 783	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
 784}
 785
 786static int intel_params_stream(struct sdw_intel *sdw,
 787			       struct snd_pcm_substream *substream,
 788			       struct snd_soc_dai *dai,
 789			       struct snd_pcm_hw_params *hw_params,
 790			       int link_id, int alh_stream_id)
 791{
 792	struct sdw_intel_link_res *res = sdw->link_res;
 793	struct sdw_intel_stream_params_data params_data;
 794
 795	params_data.substream = substream;
 796	params_data.dai = dai;
 797	params_data.hw_params = hw_params;
 798	params_data.link_id = link_id;
 799	params_data.alh_stream_id = alh_stream_id;
 800
 801	if (res->ops && res->ops->params_stream && res->dev)
 802		return res->ops->params_stream(res->dev,
 803					       &params_data);
 804	return -EIO;
 805}
 806
 807static int intel_free_stream(struct sdw_intel *sdw,
 808			     struct snd_pcm_substream *substream,
 809			     struct snd_soc_dai *dai,
 810			     int link_id)
 811{
 812	struct sdw_intel_link_res *res = sdw->link_res;
 813	struct sdw_intel_stream_free_data free_data;
 814
 815	free_data.substream = substream;
 816	free_data.dai = dai;
 817	free_data.link_id = link_id;
 818
 819	if (res->ops && res->ops->free_stream && res->dev)
 820		return res->ops->free_stream(res->dev,
 821					     &free_data);
 822
 823	return 0;
 824}
 825
 826/*
 827 * bank switch routines
 828 */
 829
 830static int intel_pre_bank_switch(struct sdw_bus *bus)
 831{
 832	struct sdw_cdns *cdns = bus_to_cdns(bus);
 833	struct sdw_intel *sdw = cdns_to_intel(cdns);
 834
 835	/* Write to register only for multi-link */
 836	if (!bus->multi_link)
 837		return 0;
 838
 839	intel_shim_sync_arm(sdw);
 840
 841	return 0;
 842}
 843
 844static int intel_post_bank_switch(struct sdw_bus *bus)
 845{
 846	struct sdw_cdns *cdns = bus_to_cdns(bus);
 847	struct sdw_intel *sdw = cdns_to_intel(cdns);
 848	void __iomem *shim = sdw->link_res->shim;
 849	int sync_reg, ret;
 850
 851	/* Write to register only for multi-link */
 852	if (!bus->multi_link)
 853		return 0;
 854
 855	mutex_lock(sdw->link_res->shim_lock);
 856
 857	/* Read SYNC register */
 858	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 859
 860	/*
 861	 * post_bank_switch() ops is called from the bus in loop for
 862	 * all the Masters in the steam with the expectation that
 863	 * we trigger the bankswitch for the only first Master in the list
 864	 * and do nothing for the other Masters
 865	 *
 866	 * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
 867	 */
 868	if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
 869		ret = 0;
 870		goto unlock;
 871	}
 872
 873	ret = intel_shim_sync_go_unlocked(sdw);
 874unlock:
 875	mutex_unlock(sdw->link_res->shim_lock);
 876
 877	if (ret < 0)
 878		dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
 879
 880	return ret;
 881}
 882
 883/*
 884 * DAI routines
 885 */
 886
 887static int intel_startup(struct snd_pcm_substream *substream,
 888			 struct snd_soc_dai *dai)
 889{
 890	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 891	int ret;
 892
 893	ret = pm_runtime_get_sync(cdns->dev);
 894	if (ret < 0 && ret != -EACCES) {
 895		dev_err_ratelimited(cdns->dev,
 896				    "pm_runtime_get_sync failed in %s, ret %d\n",
 897				    __func__, ret);
 898		pm_runtime_put_noidle(cdns->dev);
 899		return ret;
 900	}
 901	return 0;
 902}
 903
 904static int intel_hw_params(struct snd_pcm_substream *substream,
 905			   struct snd_pcm_hw_params *params,
 906			   struct snd_soc_dai *dai)
 907{
 908	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 909	struct sdw_intel *sdw = cdns_to_intel(cdns);
 910	struct sdw_cdns_dma_data *dma;
 911	struct sdw_cdns_pdi *pdi;
 912	struct sdw_stream_config sconfig;
 913	struct sdw_port_config *pconfig;
 914	int ch, dir;
 915	int ret;
 916	bool pcm = true;
 917
 918	dma = snd_soc_dai_get_dma_data(dai, substream);
 919	if (!dma)
 920		return -EIO;
 921
 922	ch = params_channels(params);
 923	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 924		dir = SDW_DATA_DIR_RX;
 925	else
 926		dir = SDW_DATA_DIR_TX;
 927
 928	if (dma->stream_type == SDW_STREAM_PDM)
 929		pcm = false;
 930
 931	if (pcm)
 932		pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
 933	else
 934		pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
 935
 936	if (!pdi) {
 937		ret = -EINVAL;
 938		goto error;
 939	}
 940
 941	/* do run-time configurations for SHIM, ALH and PDI/PORT */
 942	intel_pdi_shim_configure(sdw, pdi);
 943	intel_pdi_alh_configure(sdw, pdi);
 944	sdw_cdns_config_stream(cdns, ch, dir, pdi);
 945
 946	/* store pdi and hw_params, may be needed in prepare step */
 947	dma->suspended = false;
 948	dma->pdi = pdi;
 949	dma->hw_params = params;
 950
 951	/* Inform DSP about PDI stream number */
 952	ret = intel_params_stream(sdw, substream, dai, params,
 953				  sdw->instance,
 954				  pdi->intel_alh_id);
 955	if (ret)
 956		goto error;
 957
 958	sconfig.direction = dir;
 959	sconfig.ch_count = ch;
 960	sconfig.frame_rate = params_rate(params);
 961	sconfig.type = dma->stream_type;
 962
 963	if (dma->stream_type == SDW_STREAM_PDM) {
 964		sconfig.frame_rate *= 50;
 965		sconfig.bps = 1;
 966	} else {
 967		sconfig.bps = snd_pcm_format_width(params_format(params));
 968	}
 969
 970	/* Port configuration */
 971	pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
 972	if (!pconfig) {
 973		ret =  -ENOMEM;
 974		goto error;
 975	}
 976
 977	pconfig->num = pdi->num;
 978	pconfig->ch_mask = (1 << ch) - 1;
 979
 980	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
 981				    pconfig, 1, dma->stream);
 982	if (ret)
 983		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
 984
 985	kfree(pconfig);
 986error:
 987	return ret;
 988}
 989
 990static int intel_prepare(struct snd_pcm_substream *substream,
 991			 struct snd_soc_dai *dai)
 992{
 993	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 994	struct sdw_intel *sdw = cdns_to_intel(cdns);
 995	struct sdw_cdns_dma_data *dma;
 996	int ch, dir;
 997	int ret = 0;
 998
 999	dma = snd_soc_dai_get_dma_data(dai, substream);
1000	if (!dma) {
1001		dev_err(dai->dev, "failed to get dma data in %s\n",
1002			__func__);
1003		return -EIO;
1004	}
1005
1006	if (dma->suspended) {
1007		dma->suspended = false;
 
 
 
 
 
1008
1009		/*
1010		 * .prepare() is called after system resume, where we
1011		 * need to reinitialize the SHIM/ALH/Cadence IP.
1012		 * .prepare() is also called to deal with underflows,
1013		 * but in those cases we cannot touch ALH/SHIM
1014		 * registers
1015		 */
1016
1017		/* configure stream */
1018		ch = params_channels(dma->hw_params);
1019		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
1020			dir = SDW_DATA_DIR_RX;
1021		else
1022			dir = SDW_DATA_DIR_TX;
1023
1024		intel_pdi_shim_configure(sdw, dma->pdi);
1025		intel_pdi_alh_configure(sdw, dma->pdi);
1026		sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
1027
1028		/* Inform DSP about PDI stream number */
1029		ret = intel_params_stream(sdw, substream, dai,
1030					  dma->hw_params,
1031					  sdw->instance,
1032					  dma->pdi->intel_alh_id);
1033	}
1034
1035	return ret;
1036}
1037
1038static int
1039intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
1040{
1041	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1042	struct sdw_intel *sdw = cdns_to_intel(cdns);
1043	struct sdw_cdns_dma_data *dma;
1044	int ret;
1045
1046	dma = snd_soc_dai_get_dma_data(dai, substream);
1047	if (!dma)
1048		return -EIO;
1049
1050	/*
1051	 * The sdw stream state will transition to RELEASED when stream->
1052	 * master_list is empty. So the stream state will transition to
1053	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
1054	 * cpu-dai.
1055	 */
1056	ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
1057	if (ret < 0) {
1058		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
1059			dma->stream->name, ret);
1060		return ret;
1061	}
1062
1063	ret = intel_free_stream(sdw, substream, dai, sdw->instance);
1064	if (ret < 0) {
1065		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
1066		return ret;
1067	}
1068
1069	dma->hw_params = NULL;
1070	dma->pdi = NULL;
1071
1072	return 0;
1073}
1074
1075static void intel_shutdown(struct snd_pcm_substream *substream,
1076			   struct snd_soc_dai *dai)
 
 
 
 
 
 
1077{
1078	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 
1079
1080	pm_runtime_mark_last_busy(cdns->dev);
1081	pm_runtime_put_autosuspend(cdns->dev);
 
 
 
1082}
1083
1084static int intel_component_dais_suspend(struct snd_soc_component *component)
1085{
1086	struct sdw_cdns_dma_data *dma;
1087	struct snd_soc_dai *dai;
 
 
 
 
 
 
 
 
 
 
 
1088
1089	for_each_component_dais(component, dai) {
1090		/*
1091		 * we don't have a .suspend dai_ops, and we don't have access
1092		 * to the substream, so let's mark both capture and playback
1093		 * DMA contexts as suspended
 
1094		 */
1095		dma = dai->playback_dma_data;
1096		if (dma)
1097			dma->suspended = true;
1098
1099		dma = dai->capture_dma_data;
1100		if (dma)
1101			dma->suspended = true;
 
 
 
 
 
 
 
1102	}
1103
1104	return 0;
1105}
1106
1107static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
1108				    void *stream, int direction)
1109{
1110	return cdns_set_sdw_stream(dai, stream, true, direction);
 
 
 
 
 
 
 
 
 
 
 
 
1111}
1112
1113static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
1114				    void *stream, int direction)
1115{
1116	return cdns_set_sdw_stream(dai, stream, false, direction);
1117}
 
 
 
 
 
 
 
 
 
 
 
1118
1119static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
1120				  int direction)
1121{
1122	struct sdw_cdns_dma_data *dma;
1123
1124	if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1125		dma = dai->playback_dma_data;
1126	else
1127		dma = dai->capture_dma_data;
1128
1129	if (!dma)
1130		return ERR_PTR(-EINVAL);
 
1131
1132	return dma->stream;
1133}
1134
1135static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1136	.startup = intel_startup,
1137	.hw_params = intel_hw_params,
1138	.prepare = intel_prepare,
1139	.hw_free = intel_hw_free,
1140	.shutdown = intel_shutdown,
1141	.set_sdw_stream = intel_pcm_set_sdw_stream,
1142	.get_sdw_stream = intel_get_sdw_stream,
1143};
1144
1145static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
1146	.startup = intel_startup,
1147	.hw_params = intel_hw_params,
1148	.prepare = intel_prepare,
1149	.hw_free = intel_hw_free,
1150	.shutdown = intel_shutdown,
1151	.set_sdw_stream = intel_pdm_set_sdw_stream,
1152	.get_sdw_stream = intel_get_sdw_stream,
1153};
1154
1155static const struct snd_soc_component_driver dai_component = {
1156	.name           = "soundwire",
1157	.suspend	= intel_component_dais_suspend
 
 
1158};
1159
1160static int intel_create_dai(struct sdw_cdns *cdns,
1161			    struct snd_soc_dai_driver *dais,
1162			    enum intel_pdi_type type,
1163			    u32 num, u32 off, u32 max_ch, bool pcm)
1164{
1165	int i;
1166
1167	if (num == 0)
1168		return 0;
1169
1170	 /* TODO: Read supported rates/formats from hardware */
1171	for (i = off; i < (off + num); i++) {
1172		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1173					      "SDW%d Pin%d",
1174					      cdns->instance, i);
1175		if (!dais[i].name)
1176			return -ENOMEM;
1177
1178		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1179			dais[i].playback.channels_min = 1;
1180			dais[i].playback.channels_max = max_ch;
1181			dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1182			dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1183		}
1184
1185		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1186			dais[i].capture.channels_min = 1;
1187			dais[i].capture.channels_max = max_ch;
1188			dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1189			dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1190		}
1191
1192		if (pcm)
1193			dais[i].ops = &intel_pcm_dai_ops;
1194		else
1195			dais[i].ops = &intel_pdm_dai_ops;
1196	}
1197
1198	return 0;
1199}
1200
1201static int intel_register_dai(struct sdw_intel *sdw)
1202{
 
 
1203	struct sdw_cdns *cdns = &sdw->cdns;
1204	struct sdw_cdns_streams *stream;
1205	struct snd_soc_dai_driver *dais;
1206	int num_dai, ret, off = 0;
1207
 
 
 
 
 
 
 
 
1208	/* DAIs are created based on total number of PDIs supported */
1209	num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi;
 
 
 
 
 
 
 
1210
1211	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1212	if (!dais)
1213		return -ENOMEM;
1214
1215	/* Create PCM DAIs */
1216	stream = &cdns->pcm;
1217
1218	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1219			       off, stream->num_ch_in, true);
1220	if (ret)
1221		return ret;
1222
1223	off += cdns->pcm.num_in;
1224	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1225			       off, stream->num_ch_out, true);
1226	if (ret)
1227		return ret;
1228
1229	off += cdns->pcm.num_out;
1230	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1231			       off, stream->num_ch_bd, true);
1232	if (ret)
1233		return ret;
1234
1235	/* Create PDM DAIs */
1236	stream = &cdns->pdm;
1237	off += cdns->pcm.num_bd;
1238	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
1239			       off, stream->num_ch_in, false);
1240	if (ret)
1241		return ret;
1242
1243	off += cdns->pdm.num_in;
1244	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
1245			       off, stream->num_ch_out, false);
1246	if (ret)
1247		return ret;
1248
1249	off += cdns->pdm.num_out;
1250	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
1251			       off, stream->num_ch_bd, false);
1252	if (ret)
1253		return ret;
1254
1255	return snd_soc_register_component(cdns->dev, &dai_component,
1256					  dais, num_dai);
1257}
1258
1259static int sdw_master_read_intel_prop(struct sdw_bus *bus)
1260{
1261	struct sdw_master_prop *prop = &bus->prop;
1262	struct fwnode_handle *link;
1263	char name[32];
1264	u32 quirk_mask;
1265
1266	/* Find master handle */
1267	snprintf(name, sizeof(name),
1268		 "mipi-sdw-link-%d-subproperties", bus->link_id);
1269
1270	link = device_get_named_child_node(bus->dev, name);
1271	if (!link) {
1272		dev_err(bus->dev, "Master node %s not found\n", name);
1273		return -EIO;
1274	}
1275
1276	fwnode_property_read_u32(link,
1277				 "intel-sdw-ip-clock",
1278				 &prop->mclk_freq);
1279
1280	/* the values reported by BIOS are the 2x clock, not the bus clock */
1281	prop->mclk_freq /= 2;
1282
1283	fwnode_property_read_u32(link,
1284				 "intel-quirk-mask",
1285				 &quirk_mask);
1286
1287	if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
1288		prop->hw_disabled = true;
1289
1290	prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
1291		SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
 
 
 
1292
1293	return 0;
1294}
1295
1296static int intel_prop_read(struct sdw_bus *bus)
1297{
1298	/* Initialize with default handler to read all DisCo properties */
1299	sdw_master_read_prop(bus);
1300
1301	/* read Intel-specific properties */
1302	sdw_master_read_intel_prop(bus);
1303
1304	return 0;
1305}
1306
1307static struct sdw_master_ops sdw_intel_ops = {
1308	.read_prop = sdw_master_read_prop,
1309	.override_adr = sdw_dmi_override_adr,
1310	.xfer_msg = cdns_xfer_msg,
1311	.xfer_msg_defer = cdns_xfer_msg_defer,
1312	.reset_page_addr = cdns_reset_page_addr,
1313	.set_bus_conf = cdns_bus_conf,
1314	.pre_bank_switch = intel_pre_bank_switch,
1315	.post_bank_switch = intel_post_bank_switch,
1316};
1317
1318static int intel_init(struct sdw_intel *sdw)
1319{
1320	bool clock_stop;
1321
1322	/* Initialize shim and controller */
1323	intel_link_power_up(sdw);
1324
1325	clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
1326
1327	intel_shim_init(sdw, clock_stop);
1328
1329	return 0;
1330}
1331
1332/*
1333 * probe and init (aux_dev_id argument is required by function prototype but not used)
1334 */
1335static int intel_link_probe(struct auxiliary_device *auxdev,
1336			    const struct auxiliary_device_id *aux_dev_id)
1337
1338{
1339	struct device *dev = &auxdev->dev;
1340	struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
1341	struct sdw_intel *sdw;
1342	struct sdw_cdns *cdns;
1343	struct sdw_bus *bus;
1344	int ret;
1345
1346	sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
1347	if (!sdw)
1348		return -ENOMEM;
1349
1350	cdns = &sdw->cdns;
1351	bus = &cdns->bus;
1352
1353	sdw->instance = auxdev->id;
1354	sdw->link_res = &ldev->link_res;
1355	cdns->dev = dev;
1356	cdns->registers = sdw->link_res->registers;
1357	cdns->instance = sdw->instance;
1358	cdns->msg_count = 0;
1359
1360	bus->link_id = auxdev->id;
1361
1362	sdw_cdns_probe(cdns);
1363
1364	/* Set property read ops */
1365	sdw_intel_ops.read_prop = intel_prop_read;
1366	bus->ops = &sdw_intel_ops;
1367
1368	/* set driver data, accessed by snd_soc_dai_get_drvdata() */
1369	dev_set_drvdata(dev, cdns);
1370
1371	/* use generic bandwidth allocation algorithm */
1372	sdw->cdns.bus.compute_params = sdw_compute_params;
1373
1374	ret = sdw_bus_master_add(bus, dev, dev->fwnode);
1375	if (ret) {
1376		dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
1377		return ret;
1378	}
1379
1380	if (bus->prop.hw_disabled)
1381		dev_info(dev,
1382			 "SoundWire master %d is disabled, will be ignored\n",
1383			 bus->link_id);
1384	/*
1385	 * Ignore BIOS err_threshold, it's a really bad idea when dealing
1386	 * with multiple hardware synchronized links
1387	 */
1388	bus->prop.err_threshold = 0;
1389
1390	return 0;
1391}
1392
1393int intel_link_startup(struct auxiliary_device *auxdev)
1394{
1395	struct sdw_cdns_stream_config config;
1396	struct device *dev = &auxdev->dev;
1397	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1398	struct sdw_intel *sdw = cdns_to_intel(cdns);
1399	struct sdw_bus *bus = &cdns->bus;
1400	int link_flags;
1401	bool multi_link;
1402	u32 clock_stop_quirks;
1403	int ret;
1404
1405	if (bus->prop.hw_disabled) {
1406		dev_info(dev,
1407			 "SoundWire master %d is disabled, ignoring\n",
1408			 sdw->instance);
1409		return 0;
1410	}
1411
1412	link_flags = md_flags >> (bus->link_id * 8);
1413	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1414	if (!multi_link) {
1415		dev_dbg(dev, "Multi-link is disabled\n");
1416		bus->multi_link = false;
1417	} else {
1418		/*
1419		 * hardware-based synchronization is required regardless
1420		 * of the number of segments used by a stream: SSP-based
1421		 * synchronization is gated by gsync when the multi-master
1422		 * mode is set.
1423		 */
1424		bus->multi_link = true;
1425		bus->hw_sync_min_links = 1;
1426	}
1427
1428	/* Initialize shim, controller */
1429	ret = intel_init(sdw);
1430	if (ret)
1431		goto err_init;
1432
1433	/* Read the PDI config and initialize cadence PDI */
1434	intel_pdi_init(sdw, &config);
1435	ret = sdw_cdns_pdi_init(cdns, config);
1436	if (ret)
1437		goto err_init;
1438
1439	intel_pdi_ch_update(sdw);
1440
1441	ret = sdw_cdns_enable_interrupt(cdns, true);
1442	if (ret < 0) {
1443		dev_err(dev, "cannot enable interrupts\n");
1444		goto err_init;
1445	}
1446
1447	/*
1448	 * follow recommended programming flows to avoid timeouts when
1449	 * gsync is enabled
1450	 */
1451	if (multi_link)
1452		intel_shim_sync_arm(sdw);
1453
1454	ret = sdw_cdns_init(cdns);
1455	if (ret < 0) {
1456		dev_err(dev, "unable to initialize Cadence IP\n");
1457		goto err_interrupt;
1458	}
1459
1460	ret = sdw_cdns_exit_reset(cdns);
1461	if (ret < 0) {
1462		dev_err(dev, "unable to exit bus reset sequence\n");
1463		goto err_interrupt;
1464	}
1465
1466	if (multi_link) {
1467		ret = intel_shim_sync_go(sdw);
1468		if (ret < 0) {
1469			dev_err(dev, "sync go failed: %d\n", ret);
1470			goto err_interrupt;
1471		}
1472	}
1473
1474	/* Register DAIs */
1475	ret = intel_register_dai(sdw);
1476	if (ret) {
1477		dev_err(dev, "DAI registration failed: %d\n", ret);
1478		snd_soc_unregister_component(dev);
1479		goto err_interrupt;
1480	}
1481
1482	intel_debugfs_init(sdw);
1483
1484	/* Enable runtime PM */
1485	if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
1486		pm_runtime_set_autosuspend_delay(dev,
1487						 INTEL_MASTER_SUSPEND_DELAY_MS);
1488		pm_runtime_use_autosuspend(dev);
1489		pm_runtime_mark_last_busy(dev);
1490
1491		pm_runtime_set_active(dev);
1492		pm_runtime_enable(dev);
1493	}
1494
1495	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1496	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
1497		/*
1498		 * To keep the clock running we need to prevent
1499		 * pm_runtime suspend from happening by increasing the
1500		 * reference count.
1501		 * This quirk is specified by the parent PCI device in
1502		 * case of specific latency requirements. It will have
1503		 * no effect if pm_runtime is disabled by the user via
1504		 * a module parameter for testing purposes.
1505		 */
1506		pm_runtime_get_noresume(dev);
1507	}
1508
1509	/*
1510	 * The runtime PM status of Slave devices is "Unsupported"
1511	 * until they report as ATTACHED. If they don't, e.g. because
1512	 * there are no Slave devices populated or if the power-on is
1513	 * delayed or dependent on a power switch, the Master will
1514	 * remain active and prevent its parent from suspending.
1515	 *
1516	 * Conditionally force the pm_runtime core to re-evaluate the
1517	 * Master status in the absence of any Slave activity. A quirk
1518	 * is provided to e.g. deal with Slaves that may be powered on
1519	 * with a delay. A more complete solution would require the
1520	 * definition of Master properties.
1521	 */
1522	if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1523		pm_runtime_idle(dev);
1524
1525	return 0;
1526
1527err_interrupt:
1528	sdw_cdns_enable_interrupt(cdns, false);
1529err_init:
1530	return ret;
1531}
1532
1533static void intel_link_remove(struct auxiliary_device *auxdev)
1534{
1535	struct device *dev = &auxdev->dev;
1536	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1537	struct sdw_intel *sdw = cdns_to_intel(cdns);
1538	struct sdw_bus *bus = &cdns->bus;
1539
1540	/*
1541	 * Since pm_runtime is already disabled, we don't decrease
1542	 * the refcount when the clock_stop_quirk is
1543	 * SDW_INTEL_CLK_STOP_NOT_ALLOWED
1544	 */
1545	if (!bus->prop.hw_disabled) {
1546		intel_debugfs_exit(sdw);
1547		sdw_cdns_enable_interrupt(cdns, false);
1548		snd_soc_unregister_component(dev);
1549	}
1550	sdw_bus_master_delete(bus);
1551}
1552
1553int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
1554{
1555	struct device *dev = &auxdev->dev;
1556	struct sdw_intel *sdw;
1557	struct sdw_bus *bus;
1558	void __iomem *shim;
1559	u16 wake_sts;
1560
1561	sdw = dev_get_drvdata(dev);
1562	bus = &sdw->cdns.bus;
1563
1564	if (bus->prop.hw_disabled) {
1565		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n", bus->link_id);
1566		return 0;
1567	}
1568
1569	shim = sdw->link_res->shim;
1570	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
1571
1572	if (!(wake_sts & BIT(sdw->instance)))
1573		return 0;
1574
1575	/* disable WAKEEN interrupt ASAP to prevent interrupt flood */
1576	intel_shim_wake(sdw, false);
1577
1578	/*
1579	 * resume the Master, which will generate a bus reset and result in
1580	 * Slaves re-attaching and be re-enumerated. The SoundWire physical
1581	 * device which generated the wake will trigger an interrupt, which
1582	 * will in turn cause the corresponding Linux Slave device to be
1583	 * resumed and the Slave codec driver to check the status.
1584	 */
1585	pm_request_resume(dev);
1586
1587	return 0;
1588}
1589
1590/*
1591 * PM calls
1592 */
1593
1594static int __maybe_unused intel_suspend(struct device *dev)
1595{
1596	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1597	struct sdw_intel *sdw = cdns_to_intel(cdns);
1598	struct sdw_bus *bus = &cdns->bus;
1599	u32 clock_stop_quirks;
1600	int ret;
1601
1602	if (bus->prop.hw_disabled) {
1603		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1604			bus->link_id);
1605		return 0;
1606	}
1607
1608	if (pm_runtime_suspended(dev)) {
1609		dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
1610
1611		clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1612
1613		if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1614		     !clock_stop_quirks) &&
1615		    !pm_runtime_suspended(dev->parent)) {
1616
1617			/*
1618			 * if we've enabled clock stop, and the parent
1619			 * is still active, disable shim wake. The
1620			 * SHIM registers are not accessible if the
1621			 * parent is already pm_runtime suspended so
1622			 * it's too late to change that configuration
1623			 */
1624
1625			intel_shim_wake(sdw, false);
1626		}
1627
1628		return 0;
1629	}
1630
1631	ret = sdw_cdns_enable_interrupt(cdns, false);
1632	if (ret < 0) {
1633		dev_err(dev, "cannot disable interrupts on suspend\n");
1634		return ret;
1635	}
1636
1637	ret = intel_link_power_down(sdw);
1638	if (ret) {
1639		dev_err(dev, "Link power down failed: %d\n", ret);
1640		return ret;
1641	}
1642
1643	intel_shim_wake(sdw, false);
1644
1645	return 0;
1646}
1647
1648static int __maybe_unused intel_suspend_runtime(struct device *dev)
1649{
1650	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1651	struct sdw_intel *sdw = cdns_to_intel(cdns);
1652	struct sdw_bus *bus = &cdns->bus;
1653	u32 clock_stop_quirks;
1654	int ret;
1655
1656	if (bus->prop.hw_disabled) {
1657		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1658			bus->link_id);
1659		return 0;
1660	}
1661
1662	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1663
1664	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1665
1666		ret = sdw_cdns_enable_interrupt(cdns, false);
1667		if (ret < 0) {
1668			dev_err(dev, "cannot disable interrupts on suspend\n");
1669			return ret;
1670		}
1671
1672		ret = intel_link_power_down(sdw);
1673		if (ret) {
1674			dev_err(dev, "Link power down failed: %d\n", ret);
1675			return ret;
1676		}
1677
1678		intel_shim_wake(sdw, false);
1679
1680	} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1681		   !clock_stop_quirks) {
1682		bool wake_enable = true;
1683
1684		ret = sdw_cdns_clock_stop(cdns, true);
1685		if (ret < 0) {
1686			dev_err(dev, "cannot enable clock stop on suspend\n");
1687			wake_enable = false;
1688		}
1689
1690		ret = sdw_cdns_enable_interrupt(cdns, false);
1691		if (ret < 0) {
1692			dev_err(dev, "cannot disable interrupts on suspend\n");
1693			return ret;
1694		}
1695
1696		ret = intel_link_power_down(sdw);
1697		if (ret) {
1698			dev_err(dev, "Link power down failed: %d\n", ret);
1699			return ret;
1700		}
1701
1702		intel_shim_wake(sdw, wake_enable);
1703	} else {
1704		dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1705			__func__, clock_stop_quirks);
1706		ret = -EINVAL;
1707	}
1708
1709	return ret;
1710}
1711
1712static int __maybe_unused intel_resume(struct device *dev)
1713{
1714	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1715	struct sdw_intel *sdw = cdns_to_intel(cdns);
1716	struct sdw_bus *bus = &cdns->bus;
1717	int link_flags;
1718	bool multi_link;
1719	int ret;
1720
1721	if (bus->prop.hw_disabled) {
1722		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1723			bus->link_id);
1724		return 0;
1725	}
1726
1727	link_flags = md_flags >> (bus->link_id * 8);
1728	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1729
1730	if (pm_runtime_suspended(dev)) {
1731		dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
1732
1733		/* follow required sequence from runtime_pm.rst */
1734		pm_runtime_disable(dev);
1735		pm_runtime_set_active(dev);
1736		pm_runtime_mark_last_busy(dev);
1737		pm_runtime_enable(dev);
1738
1739		link_flags = md_flags >> (bus->link_id * 8);
1740
1741		if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1742			pm_runtime_idle(dev);
1743	}
1744
1745	ret = intel_init(sdw);
1746	if (ret) {
1747		dev_err(dev, "%s failed: %d\n", __func__, ret);
1748		return ret;
1749	}
1750
1751	/*
1752	 * make sure all Slaves are tagged as UNATTACHED and provide
1753	 * reason for reinitialization
1754	 */
1755	sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1756
1757	ret = sdw_cdns_enable_interrupt(cdns, true);
1758	if (ret < 0) {
1759		dev_err(dev, "cannot enable interrupts during resume\n");
1760		return ret;
1761	}
1762
1763	/*
1764	 * follow recommended programming flows to avoid timeouts when
1765	 * gsync is enabled
1766	 */
1767	if (multi_link)
1768		intel_shim_sync_arm(sdw);
1769
1770	ret = sdw_cdns_init(&sdw->cdns);
1771	if (ret < 0) {
1772		dev_err(dev, "unable to initialize Cadence IP during resume\n");
1773		return ret;
1774	}
1775
1776	ret = sdw_cdns_exit_reset(cdns);
1777	if (ret < 0) {
1778		dev_err(dev, "unable to exit bus reset sequence during resume\n");
1779		return ret;
1780	}
1781
1782	if (multi_link) {
1783		ret = intel_shim_sync_go(sdw);
1784		if (ret < 0) {
1785			dev_err(dev, "sync go failed during resume\n");
1786			return ret;
1787		}
1788	}
1789
1790	/*
1791	 * after system resume, the pm_runtime suspend() may kick in
1792	 * during the enumeration, before any children device force the
1793	 * master device to remain active.  Using pm_runtime_get()
1794	 * routines is not really possible, since it'd prevent the
1795	 * master from suspending.
1796	 * A reasonable compromise is to update the pm_runtime
1797	 * counters and delay the pm_runtime suspend by several
1798	 * seconds, by when all enumeration should be complete.
1799	 */
1800	pm_runtime_mark_last_busy(dev);
1801
1802	return ret;
1803}
1804
1805static int __maybe_unused intel_resume_runtime(struct device *dev)
1806{
1807	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1808	struct sdw_intel *sdw = cdns_to_intel(cdns);
1809	struct sdw_bus *bus = &cdns->bus;
1810	u32 clock_stop_quirks;
1811	bool clock_stop0;
1812	int link_flags;
1813	bool multi_link;
1814	int status;
1815	int ret;
1816
1817	if (bus->prop.hw_disabled) {
1818		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1819			bus->link_id);
1820		return 0;
1821	}
1822
1823	link_flags = md_flags >> (bus->link_id * 8);
1824	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1825
1826	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1827
1828	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1829		ret = intel_init(sdw);
1830		if (ret) {
1831			dev_err(dev, "%s failed: %d\n", __func__, ret);
1832			return ret;
1833		}
1834
1835		/*
1836		 * make sure all Slaves are tagged as UNATTACHED and provide
1837		 * reason for reinitialization
1838		 */
1839		sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1840
1841		ret = sdw_cdns_enable_interrupt(cdns, true);
1842		if (ret < 0) {
1843			dev_err(dev, "cannot enable interrupts during resume\n");
1844			return ret;
1845		}
1846
1847		/*
1848		 * follow recommended programming flows to avoid
1849		 * timeouts when gsync is enabled
1850		 */
1851		if (multi_link)
1852			intel_shim_sync_arm(sdw);
1853
1854		ret = sdw_cdns_init(&sdw->cdns);
1855		if (ret < 0) {
1856			dev_err(dev, "unable to initialize Cadence IP during resume\n");
1857			return ret;
1858		}
1859
1860		ret = sdw_cdns_exit_reset(cdns);
1861		if (ret < 0) {
1862			dev_err(dev, "unable to exit bus reset sequence during resume\n");
1863			return ret;
1864		}
1865
1866		if (multi_link) {
1867			ret = intel_shim_sync_go(sdw);
1868			if (ret < 0) {
1869				dev_err(dev, "sync go failed during resume\n");
1870				return ret;
1871			}
1872		}
1873	} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
1874		ret = intel_init(sdw);
1875		if (ret) {
1876			dev_err(dev, "%s failed: %d\n", __func__, ret);
1877			return ret;
1878		}
1879
1880		/*
1881		 * An exception condition occurs for the CLK_STOP_BUS_RESET
1882		 * case if one or more masters remain active. In this condition,
1883		 * all the masters are powered on for they are in the same power
1884		 * domain. Master can preserve its context for clock stop0, so
1885		 * there is no need to clear slave status and reset bus.
1886		 */
1887		clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1888
1889		if (!clock_stop0) {
1890
1891			/*
1892			 * make sure all Slaves are tagged as UNATTACHED and
1893			 * provide reason for reinitialization
1894			 */
1895
1896			status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1897			sdw_clear_slave_status(bus, status);
1898
1899			ret = sdw_cdns_enable_interrupt(cdns, true);
1900			if (ret < 0) {
1901				dev_err(dev, "cannot enable interrupts during resume\n");
1902				return ret;
1903			}
1904
1905			/*
1906			 * follow recommended programming flows to avoid
1907			 * timeouts when gsync is enabled
1908			 */
1909			if (multi_link)
1910				intel_shim_sync_arm(sdw);
1911
1912			/*
1913			 * Re-initialize the IP since it was powered-off
1914			 */
1915			sdw_cdns_init(&sdw->cdns);
1916
1917		} else {
1918			ret = sdw_cdns_enable_interrupt(cdns, true);
1919			if (ret < 0) {
1920				dev_err(dev, "cannot enable interrupts during resume\n");
1921				return ret;
1922			}
1923		}
1924
1925		ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1926		if (ret < 0) {
1927			dev_err(dev, "unable to restart clock during resume\n");
1928			return ret;
1929		}
1930
1931		if (!clock_stop0) {
1932			ret = sdw_cdns_exit_reset(cdns);
1933			if (ret < 0) {
1934				dev_err(dev, "unable to exit bus reset sequence during resume\n");
1935				return ret;
1936			}
1937
1938			if (multi_link) {
1939				ret = intel_shim_sync_go(sdw);
1940				if (ret < 0) {
1941					dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1942					return ret;
1943				}
1944			}
1945		}
1946	} else if (!clock_stop_quirks) {
1947
1948		clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1949		if (!clock_stop0)
1950			dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
1951
1952		ret = intel_init(sdw);
1953		if (ret) {
1954			dev_err(dev, "%s failed: %d\n", __func__, ret);
1955			return ret;
1956		}
1957
1958		ret = sdw_cdns_enable_interrupt(cdns, true);
1959		if (ret < 0) {
1960			dev_err(dev, "cannot enable interrupts during resume\n");
1961			return ret;
1962		}
1963
1964		ret = sdw_cdns_clock_restart(cdns, false);
1965		if (ret < 0) {
1966			dev_err(dev, "unable to resume master during resume\n");
1967			return ret;
1968		}
1969	} else {
1970		dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1971			__func__, clock_stop_quirks);
1972		ret = -EINVAL;
1973	}
1974
1975	return ret;
1976}
1977
1978static const struct dev_pm_ops intel_pm = {
1979	SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
1980	SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
1981};
1982
1983static const struct auxiliary_device_id intel_link_id_table[] = {
1984	{ .name = "soundwire_intel.link" },
1985	{},
1986};
1987MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table);
1988
1989static struct auxiliary_driver sdw_intel_drv = {
1990	.probe = intel_link_probe,
1991	.remove = intel_link_remove,
1992	.driver = {
1993		/* auxiliary_driver_register() sets .name to be the modname */
1994		.pm = &intel_pm,
1995	},
1996	.id_table = intel_link_id_table
1997};
1998module_auxiliary_driver(sdw_intel_drv);
1999
2000MODULE_LICENSE("Dual BSD/GPL");
2001MODULE_DESCRIPTION("Intel Soundwire Link Driver");