Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2// Copyright(c) 2015-17 Intel Corporation.
   3
   4/*
   5 * Soundwire Intel Master Driver
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/cleanup.h>
  10#include <linux/debugfs.h>
  11#include <linux/delay.h>
 
 
  12#include <linux/io.h>
 
  13#include <sound/pcm_params.h>
  14#include <linux/pm_runtime.h>
  15#include <sound/soc.h>
  16#include <linux/soundwire/sdw_registers.h>
  17#include <linux/soundwire/sdw.h>
  18#include <linux/soundwire/sdw_intel.h>
  19#include "cadence_master.h"
  20#include "bus.h"
  21#include "intel.h"
  22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  23static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
  24{
  25	int timeout = 10;
  26	u32 reg_read;
  27
  28	do {
  29		reg_read = readl(base + offset);
  30		if ((reg_read & mask) == target)
  31			return 0;
  32
  33		timeout--;
  34		usleep_range(50, 100);
  35	} while (timeout != 0);
  36
  37	return -EAGAIN;
  38}
  39
  40static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
  41{
  42	writel(value, base + offset);
  43	return intel_wait_bit(base, offset, mask, 0);
  44}
  45
  46static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
  47{
  48	writel(value, base + offset);
  49	return intel_wait_bit(base, offset, mask, mask);
  50}
  51
  52/*
  53 * debugfs
  54 */
  55#ifdef CONFIG_DEBUG_FS
  56
  57#define RD_BUF (2 * PAGE_SIZE)
  58
  59static ssize_t intel_sprintf(void __iomem *mem, bool l,
  60			     char *buf, size_t pos, unsigned int reg)
  61{
  62	int value;
  63
  64	if (l)
  65		value = intel_readl(mem, reg);
  66	else
  67		value = intel_readw(mem, reg);
  68
  69	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
  70}
  71
  72static int intel_reg_show(struct seq_file *s_file, void *data)
  73{
  74	struct sdw_intel *sdw = s_file->private;
  75	void __iomem *s = sdw->link_res->shim;
  76	void __iomem *a = sdw->link_res->alh;
 
  77	ssize_t ret;
  78	int i, j;
  79	unsigned int links, reg;
  80
  81	char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
  82	if (!buf)
  83		return -ENOMEM;
  84
  85	links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
  86
  87	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
  88	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
  89
  90	for (i = 0; i < links; i++) {
  91		reg = SDW_SHIM_LCAP + i * 4;
  92		ret += intel_sprintf(s, true, buf, ret, reg);
  93	}
  94
  95	for (i = 0; i < links; i++) {
  96		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
  97		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
  98		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
  99		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
 100		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
 101		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
 102		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
 103
 104		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
 105
 106		/*
 107		 * the value 10 is the number of PDIs. We will need a
 108		 * cleanup to remove hard-coded Intel configurations
 109		 * from cadence_master.c
 110		 */
 111		for (j = 0; j < 10; j++) {
 112			ret += intel_sprintf(s, false, buf, ret,
 113					SDW_SHIM_PCMSYCHM(i, j));
 114			ret += intel_sprintf(s, false, buf, ret,
 115					SDW_SHIM_PCMSYCHC(i, j));
 116		}
 117		ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
 118
 
 119		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
 120		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
 121	}
 122
 123	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
 124	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
 125	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
 126
 127	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
 128	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
 129		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
 130
 131	seq_printf(s_file, "%s", buf);
 
 132
 133	return 0;
 134}
 135DEFINE_SHOW_ATTRIBUTE(intel_reg);
 136
 137static int intel_set_m_datamode(void *data, u64 value)
 138{
 139	struct sdw_intel *sdw = data;
 140	struct sdw_bus *bus = &sdw->cdns.bus;
 141
 142	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 143		return -EINVAL;
 144
 145	/* Userspace changed the hardware state behind the kernel's back */
 146	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 147
 148	bus->params.m_data_mode = value;
 149
 150	return 0;
 151}
 152DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
 153			 intel_set_m_datamode, "%llu\n");
 154
 155static int intel_set_s_datamode(void *data, u64 value)
 156{
 157	struct sdw_intel *sdw = data;
 158	struct sdw_bus *bus = &sdw->cdns.bus;
 159
 160	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 161		return -EINVAL;
 162
 163	/* Userspace changed the hardware state behind the kernel's back */
 164	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 165
 166	bus->params.s_data_mode = value;
 167
 168	return 0;
 169}
 170DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
 171			 intel_set_s_datamode, "%llu\n");
 172
 173static void intel_debugfs_init(struct sdw_intel *sdw)
 174{
 175	struct dentry *root = sdw->cdns.bus.debugfs;
 176
 177	if (!root)
 178		return;
 179
 180	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
 181
 182	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
 183			    &intel_reg_fops);
 184
 185	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
 186			    &intel_set_m_datamode_fops);
 187
 188	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
 189			    &intel_set_s_datamode_fops);
 190
 191	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
 192}
 193
 194static void intel_debugfs_exit(struct sdw_intel *sdw)
 195{
 196	debugfs_remove_recursive(sdw->debugfs);
 197}
 198#else
 199static void intel_debugfs_init(struct sdw_intel *sdw) {}
 200static void intel_debugfs_exit(struct sdw_intel *sdw) {}
 201#endif /* CONFIG_DEBUG_FS */
 202
 203/*
 204 * shim ops
 205 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206/* this needs to be called with shim_lock */
 207static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
 208{
 209	void __iomem *shim = sdw->link_res->shim;
 210	unsigned int link_id = sdw->instance;
 211	u16 ioctl;
 212
 213	/* Switch to MIP from Glue logic */
 214	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
 215
 216	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
 217	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 218	usleep_range(10, 15);
 219
 220	ioctl &= ~(SDW_SHIM_IOCTL_DO);
 221	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 222	usleep_range(10, 15);
 223
 224	ioctl |= (SDW_SHIM_IOCTL_MIF);
 225	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 226	usleep_range(10, 15);
 227
 228	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
 229	ioctl &= ~(SDW_SHIM_IOCTL_COE);
 230	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 231	usleep_range(10, 15);
 232
 233	/* at this point Master IP has full control of the I/Os */
 234}
 235
 236/* this needs to be called with shim_lock */
 237static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
 238{
 239	unsigned int link_id = sdw->instance;
 240	void __iomem *shim = sdw->link_res->shim;
 241	u16 ioctl;
 242
 243	/* Glue logic */
 244	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
 245	ioctl |= SDW_SHIM_IOCTL_BKE;
 246	ioctl |= SDW_SHIM_IOCTL_COE;
 247	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 248	usleep_range(10, 15);
 249
 250	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
 251	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 252	usleep_range(10, 15);
 253
 254	/* at this point Integration Glue has full control of the I/Os */
 255}
 256
 257/* this needs to be called with shim_lock */
 258static void intel_shim_init(struct sdw_intel *sdw)
 259{
 260	void __iomem *shim = sdw->link_res->shim;
 261	unsigned int link_id = sdw->instance;
 262	u16 ioctl = 0, act;
 
 
 
 263
 264	/* Initialize Shim */
 265	ioctl |= SDW_SHIM_IOCTL_BKE;
 266	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 267	usleep_range(10, 15);
 268
 269	ioctl |= SDW_SHIM_IOCTL_WPDD;
 270	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 271	usleep_range(10, 15);
 272
 273	ioctl |= SDW_SHIM_IOCTL_DO;
 274	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 275	usleep_range(10, 15);
 276
 277	ioctl |= SDW_SHIM_IOCTL_DOE;
 278	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 279	usleep_range(10, 15);
 280
 281	intel_shim_glue_to_master_ip(sdw);
 282
 283	act = intel_readw(shim, SDW_SHIM_CTMCTL(link_id));
 284	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
 285	act |= SDW_SHIM_CTMCTL_DACTQE;
 286	act |= SDW_SHIM_CTMCTL_DODS;
 287	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
 288	usleep_range(10, 15);
 289}
 290
 291static int intel_shim_check_wake(struct sdw_intel *sdw)
 292{
 293	void __iomem *shim;
 294	u16 wake_sts;
 295
 296	shim = sdw->link_res->shim;
 297	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 298
 299	return wake_sts & BIT(sdw->instance);
 300}
 301
 302static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
 303{
 304	void __iomem *shim = sdw->link_res->shim;
 305	unsigned int link_id = sdw->instance;
 306	u16 wake_en, wake_sts;
 307
 308	mutex_lock(sdw->link_res->shim_lock);
 309	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
 310
 311	if (wake_enable) {
 312		/* Enable the wakeup */
 313		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 314		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 315	} else {
 316		/* Disable the wake up interrupt */
 317		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
 318		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 319
 320		/* Clear wake status */
 321		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 322		wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
 323		intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
 324	}
 325	mutex_unlock(sdw->link_res->shim_lock);
 326}
 327
 328static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
 329{
 330	void __iomem *shim = sdw->link_res->shim;
 331	int sync_reg;
 332
 333	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 334	return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK);
 335}
 336
 337static int intel_link_power_up(struct sdw_intel *sdw)
 338{
 339	unsigned int link_id = sdw->instance;
 340	void __iomem *shim = sdw->link_res->shim;
 341	u32 *shim_mask = sdw->link_res->shim_mask;
 342	struct sdw_bus *bus = &sdw->cdns.bus;
 343	struct sdw_master_prop *prop = &bus->prop;
 344	u32 spa_mask, cpa_mask;
 345	u32 link_control;
 346	int ret = 0;
 347	u32 clock_source;
 348	u32 syncprd;
 349	u32 sync_reg;
 350	bool lcap_mlcs;
 351
 352	mutex_lock(sdw->link_res->shim_lock);
 353
 354	/*
 355	 * The hardware relies on an internal counter, typically 4kHz,
 356	 * to generate the SoundWire SSP - which defines a 'safe'
 357	 * synchronization point between commands and audio transport
 358	 * and allows for multi link synchronization. The SYNCPRD value
 359	 * is only dependent on the oscillator clock provided to
 360	 * the IP, so adjust based on _DSD properties reported in DSDT
 361	 * tables. The values reported are based on either 24MHz
 362	 * (CNL/CML) or 38.4 MHz (ICL/TGL+). On MeteorLake additional
 363	 * frequencies are available with the MLCS clock source selection.
 364	 */
 365	lcap_mlcs = intel_readl(shim, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_MLCS_MASK;
 366
 367	if (prop->mclk_freq % 6000000) {
 368		if (prop->mclk_freq % 2400000) {
 369			if (lcap_mlcs) {
 370				syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576;
 371				clock_source = SDW_SHIM_MLCS_CARDINAL_CLK;
 372			} else {
 373				dev_err(sdw->cdns.dev, "%s: invalid clock configuration, mclk %d lcap_mlcs %d\n",
 374					__func__, prop->mclk_freq, lcap_mlcs);
 375				ret = -EINVAL;
 376				goto out;
 377			}
 378		} else {
 379			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
 380			clock_source = SDW_SHIM_MLCS_XTAL_CLK;
 381		}
 382	} else {
 383		if (lcap_mlcs) {
 384			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96;
 385			clock_source = SDW_SHIM_MLCS_AUDIO_PLL_CLK;
 386		} else {
 387			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
 388			clock_source = SDW_SHIM_MLCS_XTAL_CLK;
 389		}
 390	}
 391
 392	if (!*shim_mask) {
 393		dev_dbg(sdw->cdns.dev, "powering up all links\n");
 394
 395		/* we first need to program the SyncPRD/CPU registers */
 396		dev_dbg(sdw->cdns.dev,
 397			"first link up, programming SYNCPRD\n");
 398
 399		/* set SyncPRD period */
 400		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 401		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
 402
 403		/* Set SyncCPU bit */
 404		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
 405		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 406
 407		/* Link power up sequence */
 408		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 409
 410		/* only power-up enabled links */
 411		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
 412		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 413
 414		link_control |=  spa_mask;
 415
 416		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 417		if (ret < 0) {
 418			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
 419			goto out;
 420		}
 421
 422		/* SyncCPU will change once link is active */
 423		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
 424				     SDW_SHIM_SYNC_SYNCCPU, 0);
 425		if (ret < 0) {
 426			dev_err(sdw->cdns.dev,
 427				"Failed to set SHIM_SYNC: %d\n", ret);
 428			goto out;
 429		}
 430
 431		/* update link clock if needed */
 432		if (lcap_mlcs) {
 433			link_control = intel_readl(shim, SDW_SHIM_LCTL);
 434			u32p_replace_bits(&link_control, clock_source, SDW_SHIM_LCTL_MLCS_MASK);
 435			intel_writel(shim, SDW_SHIM_LCTL, link_control);
 436		}
 437	}
 438
 439	*shim_mask |= BIT(link_id);
 440
 441	sdw->cdns.link_up = true;
 442
 443	intel_shim_init(sdw);
 444
 445out:
 446	mutex_unlock(sdw->link_res->shim_lock);
 447
 448	return ret;
 449}
 450
 451static int intel_link_power_down(struct sdw_intel *sdw)
 452{
 453	u32 link_control, spa_mask, cpa_mask;
 454	unsigned int link_id = sdw->instance;
 455	void __iomem *shim = sdw->link_res->shim;
 456	u32 *shim_mask = sdw->link_res->shim_mask;
 457	int ret = 0;
 458
 459	mutex_lock(sdw->link_res->shim_lock);
 460
 461	if (!(*shim_mask & BIT(link_id)))
 462		dev_err(sdw->cdns.dev,
 463			"%s: Unbalanced power-up/down calls\n", __func__);
 464
 465	sdw->cdns.link_up = false;
 466
 467	intel_shim_master_ip_to_glue(sdw);
 468
 469	*shim_mask &= ~BIT(link_id);
 470
 471	if (!*shim_mask) {
 472
 473		dev_dbg(sdw->cdns.dev, "powering down all links\n");
 474
 475		/* Link power down sequence */
 476		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 477
 478		/* only power-down enabled links */
 479		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
 480		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 481
 482		link_control &=  spa_mask;
 483
 484		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 485		if (ret < 0) {
 486			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
 487
 488			/*
 489			 * we leave the sdw->cdns.link_up flag as false since we've disabled
 490			 * the link at this point and cannot handle interrupts any longer.
 491			 */
 492		}
 493	}
 494
 495	mutex_unlock(sdw->link_res->shim_lock);
 496
 497	return ret;
 498}
 499
 500static void intel_shim_sync_arm(struct sdw_intel *sdw)
 501{
 502	void __iomem *shim = sdw->link_res->shim;
 503	u32 sync_reg;
 504
 505	mutex_lock(sdw->link_res->shim_lock);
 506
 507	/* update SYNC register */
 508	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 509	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
 510	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 511
 512	mutex_unlock(sdw->link_res->shim_lock);
 513}
 514
 515static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
 516{
 517	void __iomem *shim = sdw->link_res->shim;
 518	u32 sync_reg;
 
 519
 520	/* Read SYNC register */
 521	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 522
 523	/*
 524	 * Set SyncGO bit to synchronously trigger a bank switch for
 525	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
 526	 * the Masters.
 527	 */
 528	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
 529
 530	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 
 531
 532	return 0;
 
 
 
 533}
 534
 535static int intel_shim_sync_go(struct sdw_intel *sdw)
 536{
 537	int ret;
 538
 539	mutex_lock(sdw->link_res->shim_lock);
 540
 541	ret = intel_shim_sync_go_unlocked(sdw);
 542
 543	mutex_unlock(sdw->link_res->shim_lock);
 544
 545	return ret;
 546}
 547
 548/*
 549 * PDI routines
 550 */
 551static void intel_pdi_init(struct sdw_intel *sdw,
 552			   struct sdw_cdns_stream_config *config)
 553{
 554	void __iomem *shim = sdw->link_res->shim;
 555	unsigned int link_id = sdw->instance;
 556	int pcm_cap;
 557
 558	/* PCM Stream Capability */
 559	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
 560
 561	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
 562	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
 563	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
 564
 565	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
 566		config->pcm_bd, config->pcm_in, config->pcm_out);
 
 
 
 
 
 
 
 
 
 
 567}
 568
 569static int
 570intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
 571{
 572	void __iomem *shim = sdw->link_res->shim;
 573	unsigned int link_id = sdw->instance;
 574	int count;
 575
 576	count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
 
 577
 578	/*
 579	 * WORKAROUND: on all existing Intel controllers, pdi
 580	 * number 2 reports channel count as 1 even though it
 581	 * supports 8 channels. Performing hardcoding for pdi
 582	 * number 2.
 583	 */
 584	if (pdi_num == 2)
 585		count = 7;
 
 
 
 
 
 586
 587	/* zero based values for channel count in register */
 588	count++;
 589
 590	return count;
 591}
 592
 593static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
 594				   struct sdw_cdns_pdi *pdi,
 595				   unsigned int num_pdi,
 596				   unsigned int *num_ch)
 597{
 598	int i, ch_count = 0;
 599
 600	for (i = 0; i < num_pdi; i++) {
 601		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
 602		ch_count += pdi->ch_count;
 603		pdi++;
 604	}
 605
 606	*num_ch = ch_count;
 607	return 0;
 608}
 609
 610static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
 611				      struct sdw_cdns_streams *stream)
 612{
 613	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
 614				&stream->num_ch_bd);
 615
 616	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
 617				&stream->num_ch_in);
 618
 619	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
 620				&stream->num_ch_out);
 
 
 
 
 
 
 
 
 
 621
 622	return 0;
 623}
 624
 625static void
 626intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 627{
 628	void __iomem *shim = sdw->link_res->shim;
 629	unsigned int link_id = sdw->instance;
 630	int pdi_conf = 0;
 631
 632	/* the Bulk and PCM streams are not contiguous */
 633	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 634	if (pdi->num >= 2)
 635		pdi->intel_alh_id += 2;
 636
 637	/*
 638	 * Program stream parameters to stream SHIM register
 639	 * This is applicable for PCM stream only.
 640	 */
 641	if (pdi->type != SDW_STREAM_PCM)
 642		return;
 643
 644	if (pdi->dir == SDW_DATA_DIR_RX)
 645		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
 646	else
 647		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
 648
 649	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
 650	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
 651	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
 652
 653	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
 654}
 655
 656static void
 657intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 658{
 659	void __iomem *alh = sdw->link_res->alh;
 660	unsigned int link_id = sdw->instance;
 661	unsigned int conf;
 662
 663	/* the Bulk and PCM streams are not contiguous */
 664	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 665	if (pdi->num >= 2)
 666		pdi->intel_alh_id += 2;
 667
 668	/* Program Stream config ALH register */
 669	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
 670
 671	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
 672	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
 673
 674	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
 675}
 676
 677static int intel_params_stream(struct sdw_intel *sdw,
 678			       struct snd_pcm_substream *substream,
 679			       struct snd_soc_dai *dai,
 680			       struct snd_pcm_hw_params *hw_params,
 681			       int link_id, int alh_stream_id)
 682{
 683	struct sdw_intel_link_res *res = sdw->link_res;
 684	struct sdw_intel_stream_params_data params_data;
 685
 686	params_data.substream = substream;
 687	params_data.dai = dai;
 688	params_data.hw_params = hw_params;
 689	params_data.link_id = link_id;
 690	params_data.alh_stream_id = alh_stream_id;
 691
 692	if (res->ops && res->ops->params_stream && res->dev)
 693		return res->ops->params_stream(res->dev,
 694					       &params_data);
 695	return -EIO;
 696}
 697
 698/*
 699 * DAI routines
 700 */
 701
 702static int intel_free_stream(struct sdw_intel *sdw,
 703			     struct snd_pcm_substream *substream,
 704			     struct snd_soc_dai *dai,
 705			     int link_id)
 706{
 707	struct sdw_intel_link_res *res = sdw->link_res;
 708	struct sdw_intel_stream_free_data free_data;
 709
 710	free_data.substream = substream;
 711	free_data.dai = dai;
 712	free_data.link_id = link_id;
 713
 714	if (res->ops && res->ops->free_stream && res->dev)
 715		return res->ops->free_stream(res->dev, &free_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717	return 0;
 718}
 719
 720static int intel_hw_params(struct snd_pcm_substream *substream,
 721			   struct snd_pcm_hw_params *params,
 722			   struct snd_soc_dai *dai)
 723{
 724	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 725	struct sdw_intel *sdw = cdns_to_intel(cdns);
 726	struct sdw_cdns_dai_runtime *dai_runtime;
 727	struct sdw_cdns_pdi *pdi;
 728	struct sdw_stream_config sconfig;
 
 729	int ch, dir;
 730	int ret;
 
 731
 732	dai_runtime = cdns->dai_runtime_array[dai->id];
 733	if (!dai_runtime)
 734		return -EIO;
 735
 736	ch = params_channels(params);
 737	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 738		dir = SDW_DATA_DIR_RX;
 739	else
 740		dir = SDW_DATA_DIR_TX;
 741
 742	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
 
 743
 744	if (!pdi)
 745		return -EINVAL;
 
 
 
 
 
 
 
 746
 747	/* do run-time configurations for SHIM, ALH and PDI/PORT */
 748	intel_pdi_shim_configure(sdw, pdi);
 749	intel_pdi_alh_configure(sdw, pdi);
 750	sdw_cdns_config_stream(cdns, ch, dir, pdi);
 751
 752	/* store pdi and hw_params, may be needed in prepare step */
 753	dai_runtime->paused = false;
 754	dai_runtime->suspended = false;
 755	dai_runtime->pdi = pdi;
 756
 757	/* Inform DSP about PDI stream number */
 758	ret = intel_params_stream(sdw, substream, dai, params,
 759				  sdw->instance,
 760				  pdi->intel_alh_id);
 761	if (ret)
 762		return ret;
 763
 764	sconfig.direction = dir;
 765	sconfig.ch_count = ch;
 766	sconfig.frame_rate = params_rate(params);
 767	sconfig.type = dai_runtime->stream_type;
 768
 769	sconfig.bps = snd_pcm_format_width(params_format(params));
 
 
 
 
 
 770
 771	/* Port configuration */
 772	struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
 773								GFP_KERNEL);
 774	if (!pconfig)
 775		return -ENOMEM;
 
 776
 777	pconfig->num = pdi->num;
 778	pconfig->ch_mask = (1 << ch) - 1;
 779
 780	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
 781				    pconfig, 1, dai_runtime->stream);
 782	if (ret)
 783		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
 784
 
 
 785	return ret;
 786}
 787
 788static int intel_prepare(struct snd_pcm_substream *substream,
 789			 struct snd_soc_dai *dai)
 790{
 791	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 792	struct sdw_intel *sdw = cdns_to_intel(cdns);
 793	struct sdw_cdns_dai_runtime *dai_runtime;
 794	int ch, dir;
 795	int ret = 0;
 796
 797	dai_runtime = cdns->dai_runtime_array[dai->id];
 798	if (!dai_runtime) {
 799		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 800			__func__);
 801		return -EIO;
 802	}
 803
 804	if (dai_runtime->suspended) {
 805		struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
 806		struct snd_pcm_hw_params *hw_params;
 807
 808		hw_params = &rtd->dpcm[substream->stream].hw_params;
 809
 810		dai_runtime->suspended = false;
 811
 812		/*
 813		 * .prepare() is called after system resume, where we
 814		 * need to reinitialize the SHIM/ALH/Cadence IP.
 815		 * .prepare() is also called to deal with underflows,
 816		 * but in those cases we cannot touch ALH/SHIM
 817		 * registers
 818		 */
 819
 820		/* configure stream */
 821		ch = params_channels(hw_params);
 822		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 823			dir = SDW_DATA_DIR_RX;
 824		else
 825			dir = SDW_DATA_DIR_TX;
 826
 827		intel_pdi_shim_configure(sdw, dai_runtime->pdi);
 828		intel_pdi_alh_configure(sdw, dai_runtime->pdi);
 829		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
 830
 831		/* Inform DSP about PDI stream number */
 832		ret = intel_params_stream(sdw, substream, dai,
 833					  hw_params,
 834					  sdw->instance,
 835					  dai_runtime->pdi->intel_alh_id);
 836	}
 837
 838	return ret;
 839}
 840
 841static int
 842intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
 843{
 844	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 845	struct sdw_intel *sdw = cdns_to_intel(cdns);
 846	struct sdw_cdns_dai_runtime *dai_runtime;
 847	int ret;
 848
 849	dai_runtime = cdns->dai_runtime_array[dai->id];
 850	if (!dai_runtime)
 851		return -EIO;
 852
 853	/*
 854	 * The sdw stream state will transition to RELEASED when stream->
 855	 * master_list is empty. So the stream state will transition to
 856	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
 857	 * cpu-dai.
 858	 */
 859	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
 860	if (ret < 0) {
 861		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
 862			dai_runtime->stream->name, ret);
 863		return ret;
 864	}
 865
 866	ret = intel_free_stream(sdw, substream, dai, sdw->instance);
 867	if (ret < 0) {
 868		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
 869		return ret;
 870	}
 871
 872	dai_runtime->pdi = NULL;
 
 873
 874	return 0;
 875}
 876
 877static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
 878				    void *stream, int direction)
 879{
 880	return cdns_set_sdw_stream(dai, stream, direction);
 881}
 882
 883static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
 884				  int direction)
 885{
 886	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 887	struct sdw_cdns_dai_runtime *dai_runtime;
 888
 889	dai_runtime = cdns->dai_runtime_array[dai->id];
 890	if (!dai_runtime)
 891		return ERR_PTR(-EINVAL);
 892
 893	return dai_runtime->stream;
 
 894}
 895
 896static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
 897{
 898	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 899	struct sdw_cdns_dai_runtime *dai_runtime;
 900	int ret = 0;
 901
 902	dai_runtime = cdns->dai_runtime_array[dai->id];
 903	if (!dai_runtime) {
 904		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 905			__func__);
 906		return -EIO;
 907	}
 908
 909	switch (cmd) {
 910	case SNDRV_PCM_TRIGGER_SUSPEND:
 911
 
 912		/*
 913		 * The .prepare callback is used to deal with xruns and resume operations.
 914		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
 915		 * but for resume operations the DMAs and SHIM registers need to be initialized.
 916		 * the .trigger callback is used to track the suspend case only.
 917		 */
 918
 919		dai_runtime->suspended = true;
 920
 921		break;
 922
 923	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
 924		dai_runtime->paused = true;
 925		break;
 926	case SNDRV_PCM_TRIGGER_STOP:
 927	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 928		dai_runtime->paused = false;
 929		break;
 930	default:
 931		break;
 932	}
 933
 934	return ret;
 935}
 936
 937static int intel_component_probe(struct snd_soc_component *component)
 
 938{
 939	int ret;
 940
 941	/*
 942	 * make sure the device is pm_runtime_active before initiating
 943	 * bus transactions during the card registration.
 944	 * We use pm_runtime_resume() here, without taking a reference
 945	 * and releasing it immediately.
 946	 */
 947	ret = pm_runtime_resume(component->dev);
 948	if (ret < 0 && ret != -EACCES)
 949		return ret;
 950
 951	return 0;
 952}
 953
 954static int intel_component_dais_suspend(struct snd_soc_component *component)
 
 955{
 956	struct snd_soc_dai *dai;
 957
 958	/*
 959	 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
 960	 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
 961	 * Since the component suspend is called last, we can trap this corner case
 962	 * and force the DAIs to release their resources.
 963	 */
 964	for_each_component_dais(component, dai) {
 965		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 966		struct sdw_cdns_dai_runtime *dai_runtime;
 967
 968		dai_runtime = cdns->dai_runtime_array[dai->id];
 969
 970		if (!dai_runtime)
 971			continue;
 
 
 972
 973		if (dai_runtime->suspended)
 974			continue;
 
 
 975
 976		if (dai_runtime->paused)
 977			dai_runtime->suspended = true;
 978	}
 979
 980	return 0;
 981}
 982
 983static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
 
 
 
 
 
 
 
 
 
 
 
 984	.hw_params = intel_hw_params,
 985	.prepare = intel_prepare,
 986	.hw_free = intel_hw_free,
 987	.trigger = intel_trigger,
 988	.set_stream = intel_pcm_set_sdw_stream,
 989	.get_stream = intel_get_sdw_stream,
 990};
 991
 992static const struct snd_soc_component_driver dai_component = {
 993	.name			= "soundwire",
 994	.probe			= intel_component_probe,
 995	.suspend		= intel_component_dais_suspend,
 996	.legacy_dai_naming	= 1,
 997};
 998
 999static int intel_create_dai(struct sdw_cdns *cdns,
1000			    struct snd_soc_dai_driver *dais,
1001			    enum intel_pdi_type type,
1002			    u32 num, u32 off, u32 max_ch)
1003{
1004	int i;
1005
1006	if (num == 0)
1007		return 0;
1008
 
1009	for (i = off; i < (off + num); i++) {
1010		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1011					      "SDW%d Pin%d",
1012					      cdns->instance, i);
1013		if (!dais[i].name)
1014			return -ENOMEM;
1015
1016		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1017			dais[i].playback.channels_min = 1;
1018			dais[i].playback.channels_max = max_ch;
 
 
1019		}
1020
1021		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1022			dais[i].capture.channels_min = 1;
1023			dais[i].capture.channels_max = max_ch;
 
 
1024		}
1025
1026		dais[i].ops = &intel_pcm_dai_ops;
 
 
 
1027	}
1028
1029	return 0;
1030}
1031
1032static int intel_register_dai(struct sdw_intel *sdw)
1033{
1034	struct sdw_cdns_dai_runtime **dai_runtime_array;
1035	struct sdw_cdns_stream_config config;
1036	struct sdw_cdns *cdns = &sdw->cdns;
1037	struct sdw_cdns_streams *stream;
1038	struct snd_soc_dai_driver *dais;
1039	int num_dai, ret, off = 0;
1040
1041	/* Read the PDI config and initialize cadence PDI */
1042	intel_pdi_init(sdw, &config);
1043	ret = sdw_cdns_pdi_init(cdns, config);
1044	if (ret)
1045		return ret;
1046
1047	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
1048
1049	/* DAIs are created based on total number of PDIs supported */
1050	num_dai = cdns->pcm.num_pdi;
1051
1052	dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1053					 sizeof(struct sdw_cdns_dai_runtime *),
1054					 GFP_KERNEL);
1055	if (!dai_runtime_array)
1056		return -ENOMEM;
1057	cdns->dai_runtime_array = dai_runtime_array;
1058
1059	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1060	if (!dais)
1061		return -ENOMEM;
1062
1063	/* Create PCM DAIs */
1064	stream = &cdns->pcm;
1065
1066	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1067			       off, stream->num_ch_in);
1068	if (ret)
1069		return ret;
1070
1071	off += cdns->pcm.num_in;
1072	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1073			       off, stream->num_ch_out);
1074	if (ret)
1075		return ret;
1076
1077	off += cdns->pcm.num_out;
1078	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1079			       off, stream->num_ch_bd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080	if (ret)
1081		return ret;
1082
1083	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1084					       dais, num_dai);
 
 
 
 
 
 
1085}
1086
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1089	.debugfs_init = intel_debugfs_init,
1090	.debugfs_exit = intel_debugfs_exit,
 
 
 
 
 
 
 
1091
1092	.register_dai = intel_register_dai,
 
1093
1094	.check_clock_stop = intel_check_clock_stop,
1095	.start_bus = intel_start_bus,
1096	.start_bus_after_reset = intel_start_bus_after_reset,
1097	.start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1098	.stop_bus = intel_stop_bus,
1099
1100	.link_power_up = intel_link_power_up,
1101	.link_power_down = intel_link_power_down,
 
 
 
 
 
1102
1103	.shim_check_wake = intel_shim_check_wake,
1104	.shim_wake = intel_shim_wake,
1105
 
 
 
 
 
 
 
 
 
 
1106	.pre_bank_switch = intel_pre_bank_switch,
1107	.post_bank_switch = intel_post_bank_switch,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108
1109	.sync_arm = intel_shim_sync_arm,
1110	.sync_go_unlocked = intel_shim_sync_go_unlocked,
1111	.sync_go = intel_shim_sync_go,
1112	.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113};
1114EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, "SOUNDWIRE_INTEL");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v5.14.15
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2// Copyright(c) 2015-17 Intel Corporation.
   3
   4/*
   5 * Soundwire Intel Master Driver
   6 */
   7
   8#include <linux/acpi.h>
 
   9#include <linux/debugfs.h>
  10#include <linux/delay.h>
  11#include <linux/module.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/auxiliary_bus.h>
  15#include <sound/pcm_params.h>
  16#include <linux/pm_runtime.h>
  17#include <sound/soc.h>
  18#include <linux/soundwire/sdw_registers.h>
  19#include <linux/soundwire/sdw.h>
  20#include <linux/soundwire/sdw_intel.h>
  21#include "cadence_master.h"
  22#include "bus.h"
  23#include "intel.h"
  24
  25#define INTEL_MASTER_SUSPEND_DELAY_MS	3000
  26
  27/*
  28 * debug/config flags for the Intel SoundWire Master.
  29 *
  30 * Since we may have multiple masters active, we can have up to 8
  31 * flags reused in each byte, with master0 using the ls-byte, etc.
  32 */
  33
  34#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME		BIT(0)
  35#define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP		BIT(1)
  36#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE	BIT(2)
  37#define SDW_INTEL_MASTER_DISABLE_MULTI_LINK		BIT(3)
  38
  39static int md_flags;
  40module_param_named(sdw_md_flags, md_flags, int, 0444);
  41MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
  42
  43/* Intel SHIM Registers Definition */
  44#define SDW_SHIM_LCAP			0x0
  45#define SDW_SHIM_LCTL			0x4
  46#define SDW_SHIM_IPPTR			0x8
  47#define SDW_SHIM_SYNC			0xC
  48
  49#define SDW_SHIM_CTLSCAP(x)		(0x010 + 0x60 * (x))
  50#define SDW_SHIM_CTLS0CM(x)		(0x012 + 0x60 * (x))
  51#define SDW_SHIM_CTLS1CM(x)		(0x014 + 0x60 * (x))
  52#define SDW_SHIM_CTLS2CM(x)		(0x016 + 0x60 * (x))
  53#define SDW_SHIM_CTLS3CM(x)		(0x018 + 0x60 * (x))
  54#define SDW_SHIM_PCMSCAP(x)		(0x020 + 0x60 * (x))
  55
  56#define SDW_SHIM_PCMSYCHM(x, y)		(0x022 + (0x60 * (x)) + (0x2 * (y)))
  57#define SDW_SHIM_PCMSYCHC(x, y)		(0x042 + (0x60 * (x)) + (0x2 * (y)))
  58#define SDW_SHIM_PDMSCAP(x)		(0x062 + 0x60 * (x))
  59#define SDW_SHIM_IOCTL(x)		(0x06C + 0x60 * (x))
  60#define SDW_SHIM_CTMCTL(x)		(0x06E + 0x60 * (x))
  61
  62#define SDW_SHIM_WAKEEN			0x190
  63#define SDW_SHIM_WAKESTS		0x192
  64
  65#define SDW_SHIM_LCTL_SPA		BIT(0)
  66#define SDW_SHIM_LCTL_SPA_MASK		GENMASK(3, 0)
  67#define SDW_SHIM_LCTL_CPA		BIT(8)
  68#define SDW_SHIM_LCTL_CPA_MASK		GENMASK(11, 8)
  69
  70#define SDW_SHIM_SYNC_SYNCPRD_VAL_24	(24000 / SDW_CADENCE_GSYNC_KHZ - 1)
  71#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4	(38400 / SDW_CADENCE_GSYNC_KHZ - 1)
  72#define SDW_SHIM_SYNC_SYNCPRD		GENMASK(14, 0)
  73#define SDW_SHIM_SYNC_SYNCCPU		BIT(15)
  74#define SDW_SHIM_SYNC_CMDSYNC_MASK	GENMASK(19, 16)
  75#define SDW_SHIM_SYNC_CMDSYNC		BIT(16)
  76#define SDW_SHIM_SYNC_SYNCGO		BIT(24)
  77
  78#define SDW_SHIM_PCMSCAP_ISS		GENMASK(3, 0)
  79#define SDW_SHIM_PCMSCAP_OSS		GENMASK(7, 4)
  80#define SDW_SHIM_PCMSCAP_BSS		GENMASK(12, 8)
  81
  82#define SDW_SHIM_PCMSYCM_LCHN		GENMASK(3, 0)
  83#define SDW_SHIM_PCMSYCM_HCHN		GENMASK(7, 4)
  84#define SDW_SHIM_PCMSYCM_STREAM		GENMASK(13, 8)
  85#define SDW_SHIM_PCMSYCM_DIR		BIT(15)
  86
  87#define SDW_SHIM_PDMSCAP_ISS		GENMASK(3, 0)
  88#define SDW_SHIM_PDMSCAP_OSS		GENMASK(7, 4)
  89#define SDW_SHIM_PDMSCAP_BSS		GENMASK(12, 8)
  90#define SDW_SHIM_PDMSCAP_CPSS		GENMASK(15, 13)
  91
  92#define SDW_SHIM_IOCTL_MIF		BIT(0)
  93#define SDW_SHIM_IOCTL_CO		BIT(1)
  94#define SDW_SHIM_IOCTL_COE		BIT(2)
  95#define SDW_SHIM_IOCTL_DO		BIT(3)
  96#define SDW_SHIM_IOCTL_DOE		BIT(4)
  97#define SDW_SHIM_IOCTL_BKE		BIT(5)
  98#define SDW_SHIM_IOCTL_WPDD		BIT(6)
  99#define SDW_SHIM_IOCTL_CIBD		BIT(8)
 100#define SDW_SHIM_IOCTL_DIBD		BIT(9)
 101
 102#define SDW_SHIM_CTMCTL_DACTQE		BIT(0)
 103#define SDW_SHIM_CTMCTL_DODS		BIT(1)
 104#define SDW_SHIM_CTMCTL_DOAIS		GENMASK(4, 3)
 105
 106#define SDW_SHIM_WAKEEN_ENABLE		BIT(0)
 107#define SDW_SHIM_WAKESTS_STATUS		BIT(0)
 108
 109/* Intel ALH Register definitions */
 110#define SDW_ALH_STRMZCFG(x)		(0x000 + (0x4 * (x)))
 111#define SDW_ALH_NUM_STREAMS		64
 112
 113#define SDW_ALH_STRMZCFG_DMAT_VAL	0x3
 114#define SDW_ALH_STRMZCFG_DMAT		GENMASK(7, 0)
 115#define SDW_ALH_STRMZCFG_CHN		GENMASK(19, 16)
 116
 117enum intel_pdi_type {
 118	INTEL_PDI_IN = 0,
 119	INTEL_PDI_OUT = 1,
 120	INTEL_PDI_BD = 2,
 121};
 122
 123#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
 124
 125/*
 126 * Read, write helpers for HW registers
 127 */
 128static inline int intel_readl(void __iomem *base, int offset)
 129{
 130	return readl(base + offset);
 131}
 132
 133static inline void intel_writel(void __iomem *base, int offset, int value)
 134{
 135	writel(value, base + offset);
 136}
 137
 138static inline u16 intel_readw(void __iomem *base, int offset)
 139{
 140	return readw(base + offset);
 141}
 142
 143static inline void intel_writew(void __iomem *base, int offset, u16 value)
 144{
 145	writew(value, base + offset);
 146}
 147
 148static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
 149{
 150	int timeout = 10;
 151	u32 reg_read;
 152
 153	do {
 154		reg_read = readl(base + offset);
 155		if ((reg_read & mask) == target)
 156			return 0;
 157
 158		timeout--;
 159		usleep_range(50, 100);
 160	} while (timeout != 0);
 161
 162	return -EAGAIN;
 163}
 164
 165static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
 166{
 167	writel(value, base + offset);
 168	return intel_wait_bit(base, offset, mask, 0);
 169}
 170
 171static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
 172{
 173	writel(value, base + offset);
 174	return intel_wait_bit(base, offset, mask, mask);
 175}
 176
 177/*
 178 * debugfs
 179 */
 180#ifdef CONFIG_DEBUG_FS
 181
 182#define RD_BUF (2 * PAGE_SIZE)
 183
 184static ssize_t intel_sprintf(void __iomem *mem, bool l,
 185			     char *buf, size_t pos, unsigned int reg)
 186{
 187	int value;
 188
 189	if (l)
 190		value = intel_readl(mem, reg);
 191	else
 192		value = intel_readw(mem, reg);
 193
 194	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
 195}
 196
 197static int intel_reg_show(struct seq_file *s_file, void *data)
 198{
 199	struct sdw_intel *sdw = s_file->private;
 200	void __iomem *s = sdw->link_res->shim;
 201	void __iomem *a = sdw->link_res->alh;
 202	char *buf;
 203	ssize_t ret;
 204	int i, j;
 205	unsigned int links, reg;
 206
 207	buf = kzalloc(RD_BUF, GFP_KERNEL);
 208	if (!buf)
 209		return -ENOMEM;
 210
 211	links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
 212
 213	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
 214	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
 215
 216	for (i = 0; i < links; i++) {
 217		reg = SDW_SHIM_LCAP + i * 4;
 218		ret += intel_sprintf(s, true, buf, ret, reg);
 219	}
 220
 221	for (i = 0; i < links; i++) {
 222		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
 223		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
 224		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
 225		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
 226		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
 227		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
 228		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
 229
 230		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
 231
 232		/*
 233		 * the value 10 is the number of PDIs. We will need a
 234		 * cleanup to remove hard-coded Intel configurations
 235		 * from cadence_master.c
 236		 */
 237		for (j = 0; j < 10; j++) {
 238			ret += intel_sprintf(s, false, buf, ret,
 239					SDW_SHIM_PCMSYCHM(i, j));
 240			ret += intel_sprintf(s, false, buf, ret,
 241					SDW_SHIM_PCMSYCHC(i, j));
 242		}
 243		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
 244
 245		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
 246		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
 247		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
 248	}
 249
 250	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
 251	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
 252	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
 253
 254	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
 255	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
 256		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
 257
 258	seq_printf(s_file, "%s", buf);
 259	kfree(buf);
 260
 261	return 0;
 262}
 263DEFINE_SHOW_ATTRIBUTE(intel_reg);
 264
 265static int intel_set_m_datamode(void *data, u64 value)
 266{
 267	struct sdw_intel *sdw = data;
 268	struct sdw_bus *bus = &sdw->cdns.bus;
 269
 270	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 271		return -EINVAL;
 272
 273	/* Userspace changed the hardware state behind the kernel's back */
 274	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 275
 276	bus->params.m_data_mode = value;
 277
 278	return 0;
 279}
 280DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
 281			 intel_set_m_datamode, "%llu\n");
 282
 283static int intel_set_s_datamode(void *data, u64 value)
 284{
 285	struct sdw_intel *sdw = data;
 286	struct sdw_bus *bus = &sdw->cdns.bus;
 287
 288	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 289		return -EINVAL;
 290
 291	/* Userspace changed the hardware state behind the kernel's back */
 292	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 293
 294	bus->params.s_data_mode = value;
 295
 296	return 0;
 297}
 298DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
 299			 intel_set_s_datamode, "%llu\n");
 300
 301static void intel_debugfs_init(struct sdw_intel *sdw)
 302{
 303	struct dentry *root = sdw->cdns.bus.debugfs;
 304
 305	if (!root)
 306		return;
 307
 308	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
 309
 310	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
 311			    &intel_reg_fops);
 312
 313	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
 314			    &intel_set_m_datamode_fops);
 315
 316	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
 317			    &intel_set_s_datamode_fops);
 318
 319	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
 320}
 321
 322static void intel_debugfs_exit(struct sdw_intel *sdw)
 323{
 324	debugfs_remove_recursive(sdw->debugfs);
 325}
 326#else
 327static void intel_debugfs_init(struct sdw_intel *sdw) {}
 328static void intel_debugfs_exit(struct sdw_intel *sdw) {}
 329#endif /* CONFIG_DEBUG_FS */
 330
 331/*
 332 * shim ops
 333 */
 334
 335static int intel_link_power_up(struct sdw_intel *sdw)
 336{
 337	unsigned int link_id = sdw->instance;
 338	void __iomem *shim = sdw->link_res->shim;
 339	u32 *shim_mask = sdw->link_res->shim_mask;
 340	struct sdw_bus *bus = &sdw->cdns.bus;
 341	struct sdw_master_prop *prop = &bus->prop;
 342	u32 spa_mask, cpa_mask;
 343	u32 link_control;
 344	int ret = 0;
 345	u32 syncprd;
 346	u32 sync_reg;
 347
 348	mutex_lock(sdw->link_res->shim_lock);
 349
 350	/*
 351	 * The hardware relies on an internal counter, typically 4kHz,
 352	 * to generate the SoundWire SSP - which defines a 'safe'
 353	 * synchronization point between commands and audio transport
 354	 * and allows for multi link synchronization. The SYNCPRD value
 355	 * is only dependent on the oscillator clock provided to
 356	 * the IP, so adjust based on _DSD properties reported in DSDT
 357	 * tables. The values reported are based on either 24MHz
 358	 * (CNL/CML) or 38.4 MHz (ICL/TGL+).
 359	 */
 360	if (prop->mclk_freq % 6000000)
 361		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
 362	else
 363		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
 364
 365	if (!*shim_mask) {
 366		dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
 367
 368		/* we first need to program the SyncPRD/CPU registers */
 369		dev_dbg(sdw->cdns.dev,
 370			"%s: first link up, programming SYNCPRD\n", __func__);
 371
 372		/* set SyncPRD period */
 373		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 374		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
 375
 376		/* Set SyncCPU bit */
 377		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
 378		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 379
 380		/* Link power up sequence */
 381		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 382
 383		/* only power-up enabled links */
 384		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
 385		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 386
 387		link_control |=  spa_mask;
 388
 389		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 390		if (ret < 0) {
 391			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
 392			goto out;
 393		}
 394
 395		/* SyncCPU will change once link is active */
 396		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
 397				     SDW_SHIM_SYNC_SYNCCPU, 0);
 398		if (ret < 0) {
 399			dev_err(sdw->cdns.dev,
 400				"Failed to set SHIM_SYNC: %d\n", ret);
 401			goto out;
 402		}
 403	}
 404
 405	*shim_mask |= BIT(link_id);
 406
 407	sdw->cdns.link_up = true;
 408out:
 409	mutex_unlock(sdw->link_res->shim_lock);
 410
 411	return ret;
 412}
 413
 414/* this needs to be called with shim_lock */
 415static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
 416{
 417	void __iomem *shim = sdw->link_res->shim;
 418	unsigned int link_id = sdw->instance;
 419	u16 ioctl;
 420
 421	/* Switch to MIP from Glue logic */
 422	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
 423
 424	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
 425	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 426	usleep_range(10, 15);
 427
 428	ioctl &= ~(SDW_SHIM_IOCTL_DO);
 429	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 430	usleep_range(10, 15);
 431
 432	ioctl |= (SDW_SHIM_IOCTL_MIF);
 433	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 434	usleep_range(10, 15);
 435
 436	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
 437	ioctl &= ~(SDW_SHIM_IOCTL_COE);
 438	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 439	usleep_range(10, 15);
 440
 441	/* at this point Master IP has full control of the I/Os */
 442}
 443
 444/* this needs to be called with shim_lock */
 445static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
 446{
 447	unsigned int link_id = sdw->instance;
 448	void __iomem *shim = sdw->link_res->shim;
 449	u16 ioctl;
 450
 451	/* Glue logic */
 452	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
 453	ioctl |= SDW_SHIM_IOCTL_BKE;
 454	ioctl |= SDW_SHIM_IOCTL_COE;
 455	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 456	usleep_range(10, 15);
 457
 458	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
 459	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 460	usleep_range(10, 15);
 461
 462	/* at this point Integration Glue has full control of the I/Os */
 463}
 464
 465static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
 
 466{
 467	void __iomem *shim = sdw->link_res->shim;
 468	unsigned int link_id = sdw->instance;
 469	int ret = 0;
 470	u16 ioctl = 0, act = 0;
 471
 472	mutex_lock(sdw->link_res->shim_lock);
 473
 474	/* Initialize Shim */
 475	ioctl |= SDW_SHIM_IOCTL_BKE;
 476	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 477	usleep_range(10, 15);
 478
 479	ioctl |= SDW_SHIM_IOCTL_WPDD;
 480	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 481	usleep_range(10, 15);
 482
 483	ioctl |= SDW_SHIM_IOCTL_DO;
 484	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 485	usleep_range(10, 15);
 486
 487	ioctl |= SDW_SHIM_IOCTL_DOE;
 488	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 489	usleep_range(10, 15);
 490
 491	intel_shim_glue_to_master_ip(sdw);
 492
 
 493	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
 494	act |= SDW_SHIM_CTMCTL_DACTQE;
 495	act |= SDW_SHIM_CTMCTL_DODS;
 496	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
 497	usleep_range(10, 15);
 
 
 
 
 
 
 498
 499	mutex_unlock(sdw->link_res->shim_lock);
 
 500
 501	return ret;
 502}
 503
 504static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
 505{
 506	void __iomem *shim = sdw->link_res->shim;
 507	unsigned int link_id = sdw->instance;
 508	u16 wake_en, wake_sts;
 509
 510	mutex_lock(sdw->link_res->shim_lock);
 511	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
 512
 513	if (wake_enable) {
 514		/* Enable the wakeup */
 515		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 516		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 517	} else {
 518		/* Disable the wake up interrupt */
 519		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
 520		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 521
 522		/* Clear wake status */
 523		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 524		wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 525		intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts);
 526	}
 527	mutex_unlock(sdw->link_res->shim_lock);
 528}
 529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530static int intel_link_power_down(struct sdw_intel *sdw)
 531{
 532	u32 link_control, spa_mask, cpa_mask;
 533	unsigned int link_id = sdw->instance;
 534	void __iomem *shim = sdw->link_res->shim;
 535	u32 *shim_mask = sdw->link_res->shim_mask;
 536	int ret = 0;
 537
 538	mutex_lock(sdw->link_res->shim_lock);
 539
 540	if (!(*shim_mask & BIT(link_id)))
 541		dev_err(sdw->cdns.dev,
 542			"%s: Unbalanced power-up/down calls\n", __func__);
 543
 544	sdw->cdns.link_up = false;
 545
 546	intel_shim_master_ip_to_glue(sdw);
 547
 548	*shim_mask &= ~BIT(link_id);
 549
 550	if (!*shim_mask) {
 551
 552		dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
 553
 554		/* Link power down sequence */
 555		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 556
 557		/* only power-down enabled links */
 558		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
 559		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 560
 561		link_control &=  spa_mask;
 562
 563		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 564		if (ret < 0) {
 565			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
 566
 567			/*
 568			 * we leave the sdw->cdns.link_up flag as false since we've disabled
 569			 * the link at this point and cannot handle interrupts any longer.
 570			 */
 571		}
 572	}
 573
 574	mutex_unlock(sdw->link_res->shim_lock);
 575
 576	return ret;
 577}
 578
 579static void intel_shim_sync_arm(struct sdw_intel *sdw)
 580{
 581	void __iomem *shim = sdw->link_res->shim;
 582	u32 sync_reg;
 583
 584	mutex_lock(sdw->link_res->shim_lock);
 585
 586	/* update SYNC register */
 587	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 588	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
 589	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 590
 591	mutex_unlock(sdw->link_res->shim_lock);
 592}
 593
 594static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
 595{
 596	void __iomem *shim = sdw->link_res->shim;
 597	u32 sync_reg;
 598	int ret;
 599
 600	/* Read SYNC register */
 601	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 602
 603	/*
 604	 * Set SyncGO bit to synchronously trigger a bank switch for
 605	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
 606	 * the Masters.
 607	 */
 608	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
 609
 610	ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
 611			      SDW_SHIM_SYNC_SYNCGO);
 612
 613	if (ret < 0)
 614		dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
 615
 616	return ret;
 617}
 618
 619static int intel_shim_sync_go(struct sdw_intel *sdw)
 620{
 621	int ret;
 622
 623	mutex_lock(sdw->link_res->shim_lock);
 624
 625	ret = intel_shim_sync_go_unlocked(sdw);
 626
 627	mutex_unlock(sdw->link_res->shim_lock);
 628
 629	return ret;
 630}
 631
 632/*
 633 * PDI routines
 634 */
 635static void intel_pdi_init(struct sdw_intel *sdw,
 636			   struct sdw_cdns_stream_config *config)
 637{
 638	void __iomem *shim = sdw->link_res->shim;
 639	unsigned int link_id = sdw->instance;
 640	int pcm_cap, pdm_cap;
 641
 642	/* PCM Stream Capability */
 643	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
 644
 645	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
 646	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
 647	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
 648
 649	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
 650		config->pcm_bd, config->pcm_in, config->pcm_out);
 651
 652	/* PDM Stream Capability */
 653	pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
 654
 655	config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
 656	config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
 657	config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
 658
 659	dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
 660		config->pdm_bd, config->pdm_in, config->pdm_out);
 661}
 662
 663static int
 664intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
 665{
 666	void __iomem *shim = sdw->link_res->shim;
 667	unsigned int link_id = sdw->instance;
 668	int count;
 669
 670	if (pcm) {
 671		count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
 672
 673		/*
 674		 * WORKAROUND: on all existing Intel controllers, pdi
 675		 * number 2 reports channel count as 1 even though it
 676		 * supports 8 channels. Performing hardcoding for pdi
 677		 * number 2.
 678		 */
 679		if (pdi_num == 2)
 680			count = 7;
 681
 682	} else {
 683		count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
 684		count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
 685	}
 686
 687	/* zero based values for channel count in register */
 688	count++;
 689
 690	return count;
 691}
 692
 693static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
 694				   struct sdw_cdns_pdi *pdi,
 695				   unsigned int num_pdi,
 696				   unsigned int *num_ch, bool pcm)
 697{
 698	int i, ch_count = 0;
 699
 700	for (i = 0; i < num_pdi; i++) {
 701		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm);
 702		ch_count += pdi->ch_count;
 703		pdi++;
 704	}
 705
 706	*num_ch = ch_count;
 707	return 0;
 708}
 709
 710static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
 711				      struct sdw_cdns_streams *stream, bool pcm)
 712{
 713	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
 714				&stream->num_ch_bd, pcm);
 715
 716	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
 717				&stream->num_ch_in, pcm);
 718
 719	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
 720				&stream->num_ch_out, pcm);
 721
 722	return 0;
 723}
 724
 725static int intel_pdi_ch_update(struct sdw_intel *sdw)
 726{
 727	/* First update PCM streams followed by PDM streams */
 728	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true);
 729	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false);
 730
 731	return 0;
 732}
 733
 734static void
 735intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 736{
 737	void __iomem *shim = sdw->link_res->shim;
 738	unsigned int link_id = sdw->instance;
 739	int pdi_conf = 0;
 740
 741	/* the Bulk and PCM streams are not contiguous */
 742	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 743	if (pdi->num >= 2)
 744		pdi->intel_alh_id += 2;
 745
 746	/*
 747	 * Program stream parameters to stream SHIM register
 748	 * This is applicable for PCM stream only.
 749	 */
 750	if (pdi->type != SDW_STREAM_PCM)
 751		return;
 752
 753	if (pdi->dir == SDW_DATA_DIR_RX)
 754		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
 755	else
 756		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
 757
 758	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
 759	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
 760	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
 761
 762	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
 763}
 764
 765static void
 766intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 767{
 768	void __iomem *alh = sdw->link_res->alh;
 769	unsigned int link_id = sdw->instance;
 770	unsigned int conf;
 771
 772	/* the Bulk and PCM streams are not contiguous */
 773	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 774	if (pdi->num >= 2)
 775		pdi->intel_alh_id += 2;
 776
 777	/* Program Stream config ALH register */
 778	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
 779
 780	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
 781	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
 782
 783	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
 784}
 785
 786static int intel_params_stream(struct sdw_intel *sdw,
 787			       struct snd_pcm_substream *substream,
 788			       struct snd_soc_dai *dai,
 789			       struct snd_pcm_hw_params *hw_params,
 790			       int link_id, int alh_stream_id)
 791{
 792	struct sdw_intel_link_res *res = sdw->link_res;
 793	struct sdw_intel_stream_params_data params_data;
 794
 795	params_data.substream = substream;
 796	params_data.dai = dai;
 797	params_data.hw_params = hw_params;
 798	params_data.link_id = link_id;
 799	params_data.alh_stream_id = alh_stream_id;
 800
 801	if (res->ops && res->ops->params_stream && res->dev)
 802		return res->ops->params_stream(res->dev,
 803					       &params_data);
 804	return -EIO;
 805}
 806
 
 
 
 
 807static int intel_free_stream(struct sdw_intel *sdw,
 808			     struct snd_pcm_substream *substream,
 809			     struct snd_soc_dai *dai,
 810			     int link_id)
 811{
 812	struct sdw_intel_link_res *res = sdw->link_res;
 813	struct sdw_intel_stream_free_data free_data;
 814
 815	free_data.substream = substream;
 816	free_data.dai = dai;
 817	free_data.link_id = link_id;
 818
 819	if (res->ops && res->ops->free_stream && res->dev)
 820		return res->ops->free_stream(res->dev,
 821					     &free_data);
 822
 823	return 0;
 824}
 825
 826/*
 827 * bank switch routines
 828 */
 829
 830static int intel_pre_bank_switch(struct sdw_bus *bus)
 831{
 832	struct sdw_cdns *cdns = bus_to_cdns(bus);
 833	struct sdw_intel *sdw = cdns_to_intel(cdns);
 834
 835	/* Write to register only for multi-link */
 836	if (!bus->multi_link)
 837		return 0;
 838
 839	intel_shim_sync_arm(sdw);
 840
 841	return 0;
 842}
 843
 844static int intel_post_bank_switch(struct sdw_bus *bus)
 845{
 846	struct sdw_cdns *cdns = bus_to_cdns(bus);
 847	struct sdw_intel *sdw = cdns_to_intel(cdns);
 848	void __iomem *shim = sdw->link_res->shim;
 849	int sync_reg, ret;
 850
 851	/* Write to register only for multi-link */
 852	if (!bus->multi_link)
 853		return 0;
 854
 855	mutex_lock(sdw->link_res->shim_lock);
 856
 857	/* Read SYNC register */
 858	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 859
 860	/*
 861	 * post_bank_switch() ops is called from the bus in loop for
 862	 * all the Masters in the steam with the expectation that
 863	 * we trigger the bankswitch for the only first Master in the list
 864	 * and do nothing for the other Masters
 865	 *
 866	 * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
 867	 */
 868	if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
 869		ret = 0;
 870		goto unlock;
 871	}
 872
 873	ret = intel_shim_sync_go_unlocked(sdw);
 874unlock:
 875	mutex_unlock(sdw->link_res->shim_lock);
 876
 877	if (ret < 0)
 878		dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
 879
 880	return ret;
 881}
 882
 883/*
 884 * DAI routines
 885 */
 886
 887static int intel_startup(struct snd_pcm_substream *substream,
 888			 struct snd_soc_dai *dai)
 889{
 890	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 891	int ret;
 892
 893	ret = pm_runtime_get_sync(cdns->dev);
 894	if (ret < 0 && ret != -EACCES) {
 895		dev_err_ratelimited(cdns->dev,
 896				    "pm_runtime_get_sync failed in %s, ret %d\n",
 897				    __func__, ret);
 898		pm_runtime_put_noidle(cdns->dev);
 899		return ret;
 900	}
 901	return 0;
 902}
 903
 904static int intel_hw_params(struct snd_pcm_substream *substream,
 905			   struct snd_pcm_hw_params *params,
 906			   struct snd_soc_dai *dai)
 907{
 908	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 909	struct sdw_intel *sdw = cdns_to_intel(cdns);
 910	struct sdw_cdns_dma_data *dma;
 911	struct sdw_cdns_pdi *pdi;
 912	struct sdw_stream_config sconfig;
 913	struct sdw_port_config *pconfig;
 914	int ch, dir;
 915	int ret;
 916	bool pcm = true;
 917
 918	dma = snd_soc_dai_get_dma_data(dai, substream);
 919	if (!dma)
 920		return -EIO;
 921
 922	ch = params_channels(params);
 923	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 924		dir = SDW_DATA_DIR_RX;
 925	else
 926		dir = SDW_DATA_DIR_TX;
 927
 928	if (dma->stream_type == SDW_STREAM_PDM)
 929		pcm = false;
 930
 931	if (pcm)
 932		pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
 933	else
 934		pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
 935
 936	if (!pdi) {
 937		ret = -EINVAL;
 938		goto error;
 939	}
 940
 941	/* do run-time configurations for SHIM, ALH and PDI/PORT */
 942	intel_pdi_shim_configure(sdw, pdi);
 943	intel_pdi_alh_configure(sdw, pdi);
 944	sdw_cdns_config_stream(cdns, ch, dir, pdi);
 945
 946	/* store pdi and hw_params, may be needed in prepare step */
 947	dma->suspended = false;
 948	dma->pdi = pdi;
 949	dma->hw_params = params;
 950
 951	/* Inform DSP about PDI stream number */
 952	ret = intel_params_stream(sdw, substream, dai, params,
 953				  sdw->instance,
 954				  pdi->intel_alh_id);
 955	if (ret)
 956		goto error;
 957
 958	sconfig.direction = dir;
 959	sconfig.ch_count = ch;
 960	sconfig.frame_rate = params_rate(params);
 961	sconfig.type = dma->stream_type;
 962
 963	if (dma->stream_type == SDW_STREAM_PDM) {
 964		sconfig.frame_rate *= 50;
 965		sconfig.bps = 1;
 966	} else {
 967		sconfig.bps = snd_pcm_format_width(params_format(params));
 968	}
 969
 970	/* Port configuration */
 971	pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
 972	if (!pconfig) {
 973		ret =  -ENOMEM;
 974		goto error;
 975	}
 976
 977	pconfig->num = pdi->num;
 978	pconfig->ch_mask = (1 << ch) - 1;
 979
 980	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
 981				    pconfig, 1, dma->stream);
 982	if (ret)
 983		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
 984
 985	kfree(pconfig);
 986error:
 987	return ret;
 988}
 989
 990static int intel_prepare(struct snd_pcm_substream *substream,
 991			 struct snd_soc_dai *dai)
 992{
 993	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 994	struct sdw_intel *sdw = cdns_to_intel(cdns);
 995	struct sdw_cdns_dma_data *dma;
 996	int ch, dir;
 997	int ret = 0;
 998
 999	dma = snd_soc_dai_get_dma_data(dai, substream);
1000	if (!dma) {
1001		dev_err(dai->dev, "failed to get dma data in %s\n",
1002			__func__);
1003		return -EIO;
1004	}
1005
1006	if (dma->suspended) {
1007		dma->suspended = false;
 
 
 
 
 
1008
1009		/*
1010		 * .prepare() is called after system resume, where we
1011		 * need to reinitialize the SHIM/ALH/Cadence IP.
1012		 * .prepare() is also called to deal with underflows,
1013		 * but in those cases we cannot touch ALH/SHIM
1014		 * registers
1015		 */
1016
1017		/* configure stream */
1018		ch = params_channels(dma->hw_params);
1019		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
1020			dir = SDW_DATA_DIR_RX;
1021		else
1022			dir = SDW_DATA_DIR_TX;
1023
1024		intel_pdi_shim_configure(sdw, dma->pdi);
1025		intel_pdi_alh_configure(sdw, dma->pdi);
1026		sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
1027
1028		/* Inform DSP about PDI stream number */
1029		ret = intel_params_stream(sdw, substream, dai,
1030					  dma->hw_params,
1031					  sdw->instance,
1032					  dma->pdi->intel_alh_id);
1033	}
1034
1035	return ret;
1036}
1037
1038static int
1039intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
1040{
1041	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1042	struct sdw_intel *sdw = cdns_to_intel(cdns);
1043	struct sdw_cdns_dma_data *dma;
1044	int ret;
1045
1046	dma = snd_soc_dai_get_dma_data(dai, substream);
1047	if (!dma)
1048		return -EIO;
1049
1050	/*
1051	 * The sdw stream state will transition to RELEASED when stream->
1052	 * master_list is empty. So the stream state will transition to
1053	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
1054	 * cpu-dai.
1055	 */
1056	ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
1057	if (ret < 0) {
1058		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
1059			dma->stream->name, ret);
1060		return ret;
1061	}
1062
1063	ret = intel_free_stream(sdw, substream, dai, sdw->instance);
1064	if (ret < 0) {
1065		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
1066		return ret;
1067	}
1068
1069	dma->hw_params = NULL;
1070	dma->pdi = NULL;
1071
1072	return 0;
1073}
1074
1075static void intel_shutdown(struct snd_pcm_substream *substream,
1076			   struct snd_soc_dai *dai)
 
 
 
 
 
 
1077{
1078	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 
 
 
 
 
1079
1080	pm_runtime_mark_last_busy(cdns->dev);
1081	pm_runtime_put_autosuspend(cdns->dev);
1082}
1083
1084static int intel_component_dais_suspend(struct snd_soc_component *component)
1085{
1086	struct sdw_cdns_dma_data *dma;
1087	struct snd_soc_dai *dai;
 
 
 
 
 
 
 
 
 
 
 
1088
1089	for_each_component_dais(component, dai) {
1090		/*
1091		 * we don't have a .suspend dai_ops, and we don't have access
1092		 * to the substream, so let's mark both capture and playback
1093		 * DMA contexts as suspended
 
1094		 */
1095		dma = dai->playback_dma_data;
1096		if (dma)
1097			dma->suspended = true;
1098
1099		dma = dai->capture_dma_data;
1100		if (dma)
1101			dma->suspended = true;
 
 
 
 
 
 
 
1102	}
1103
1104	return 0;
1105}
1106
1107static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
1108				    void *stream, int direction)
1109{
1110	return cdns_set_sdw_stream(dai, stream, true, direction);
 
 
 
 
 
 
 
 
 
 
 
 
1111}
1112
1113static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
1114				    void *stream, int direction)
1115{
1116	return cdns_set_sdw_stream(dai, stream, false, direction);
1117}
 
 
 
 
 
 
 
 
 
 
 
1118
1119static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
1120				  int direction)
1121{
1122	struct sdw_cdns_dma_data *dma;
1123
1124	if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1125		dma = dai->playback_dma_data;
1126	else
1127		dma = dai->capture_dma_data;
1128
1129	if (!dma)
1130		return ERR_PTR(-EINVAL);
 
1131
1132	return dma->stream;
1133}
1134
1135static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1136	.startup = intel_startup,
1137	.hw_params = intel_hw_params,
1138	.prepare = intel_prepare,
1139	.hw_free = intel_hw_free,
1140	.shutdown = intel_shutdown,
1141	.set_sdw_stream = intel_pcm_set_sdw_stream,
1142	.get_sdw_stream = intel_get_sdw_stream,
1143};
1144
1145static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
1146	.startup = intel_startup,
1147	.hw_params = intel_hw_params,
1148	.prepare = intel_prepare,
1149	.hw_free = intel_hw_free,
1150	.shutdown = intel_shutdown,
1151	.set_sdw_stream = intel_pdm_set_sdw_stream,
1152	.get_sdw_stream = intel_get_sdw_stream,
1153};
1154
1155static const struct snd_soc_component_driver dai_component = {
1156	.name           = "soundwire",
1157	.suspend	= intel_component_dais_suspend
 
 
1158};
1159
1160static int intel_create_dai(struct sdw_cdns *cdns,
1161			    struct snd_soc_dai_driver *dais,
1162			    enum intel_pdi_type type,
1163			    u32 num, u32 off, u32 max_ch, bool pcm)
1164{
1165	int i;
1166
1167	if (num == 0)
1168		return 0;
1169
1170	 /* TODO: Read supported rates/formats from hardware */
1171	for (i = off; i < (off + num); i++) {
1172		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1173					      "SDW%d Pin%d",
1174					      cdns->instance, i);
1175		if (!dais[i].name)
1176			return -ENOMEM;
1177
1178		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1179			dais[i].playback.channels_min = 1;
1180			dais[i].playback.channels_max = max_ch;
1181			dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1182			dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1183		}
1184
1185		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1186			dais[i].capture.channels_min = 1;
1187			dais[i].capture.channels_max = max_ch;
1188			dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1189			dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1190		}
1191
1192		if (pcm)
1193			dais[i].ops = &intel_pcm_dai_ops;
1194		else
1195			dais[i].ops = &intel_pdm_dai_ops;
1196	}
1197
1198	return 0;
1199}
1200
1201static int intel_register_dai(struct sdw_intel *sdw)
1202{
 
 
1203	struct sdw_cdns *cdns = &sdw->cdns;
1204	struct sdw_cdns_streams *stream;
1205	struct snd_soc_dai_driver *dais;
1206	int num_dai, ret, off = 0;
1207
 
 
 
 
 
 
 
 
1208	/* DAIs are created based on total number of PDIs supported */
1209	num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi;
 
 
 
 
 
 
 
1210
1211	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1212	if (!dais)
1213		return -ENOMEM;
1214
1215	/* Create PCM DAIs */
1216	stream = &cdns->pcm;
1217
1218	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1219			       off, stream->num_ch_in, true);
1220	if (ret)
1221		return ret;
1222
1223	off += cdns->pcm.num_in;
1224	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1225			       off, stream->num_ch_out, true);
1226	if (ret)
1227		return ret;
1228
1229	off += cdns->pcm.num_out;
1230	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1231			       off, stream->num_ch_bd, true);
1232	if (ret)
1233		return ret;
1234
1235	/* Create PDM DAIs */
1236	stream = &cdns->pdm;
1237	off += cdns->pcm.num_bd;
1238	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
1239			       off, stream->num_ch_in, false);
1240	if (ret)
1241		return ret;
1242
1243	off += cdns->pdm.num_in;
1244	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
1245			       off, stream->num_ch_out, false);
1246	if (ret)
1247		return ret;
1248
1249	off += cdns->pdm.num_out;
1250	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
1251			       off, stream->num_ch_bd, false);
1252	if (ret)
1253		return ret;
1254
1255	return snd_soc_register_component(cdns->dev, &dai_component,
1256					  dais, num_dai);
1257}
1258
1259static int sdw_master_read_intel_prop(struct sdw_bus *bus)
1260{
1261	struct sdw_master_prop *prop = &bus->prop;
1262	struct fwnode_handle *link;
1263	char name[32];
1264	u32 quirk_mask;
1265
1266	/* Find master handle */
1267	snprintf(name, sizeof(name),
1268		 "mipi-sdw-link-%d-subproperties", bus->link_id);
1269
1270	link = device_get_named_child_node(bus->dev, name);
1271	if (!link) {
1272		dev_err(bus->dev, "Master node %s not found\n", name);
1273		return -EIO;
1274	}
1275
1276	fwnode_property_read_u32(link,
1277				 "intel-sdw-ip-clock",
1278				 &prop->mclk_freq);
1279
1280	/* the values reported by BIOS are the 2x clock, not the bus clock */
1281	prop->mclk_freq /= 2;
1282
1283	fwnode_property_read_u32(link,
1284				 "intel-quirk-mask",
1285				 &quirk_mask);
1286
1287	if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
1288		prop->hw_disabled = true;
1289
1290	prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
1291		SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
 
 
 
1292
1293	return 0;
1294}
1295
1296static int intel_prop_read(struct sdw_bus *bus)
1297{
1298	/* Initialize with default handler to read all DisCo properties */
1299	sdw_master_read_prop(bus);
1300
1301	/* read Intel-specific properties */
1302	sdw_master_read_intel_prop(bus);
1303
1304	return 0;
1305}
1306
1307static struct sdw_master_ops sdw_intel_ops = {
1308	.read_prop = sdw_master_read_prop,
1309	.override_adr = sdw_dmi_override_adr,
1310	.xfer_msg = cdns_xfer_msg,
1311	.xfer_msg_defer = cdns_xfer_msg_defer,
1312	.reset_page_addr = cdns_reset_page_addr,
1313	.set_bus_conf = cdns_bus_conf,
1314	.pre_bank_switch = intel_pre_bank_switch,
1315	.post_bank_switch = intel_post_bank_switch,
1316};
1317
1318static int intel_init(struct sdw_intel *sdw)
1319{
1320	bool clock_stop;
1321
1322	/* Initialize shim and controller */
1323	intel_link_power_up(sdw);
1324
1325	clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
1326
1327	intel_shim_init(sdw, clock_stop);
1328
1329	return 0;
1330}
1331
1332/*
1333 * probe and init (aux_dev_id argument is required by function prototype but not used)
1334 */
1335static int intel_link_probe(struct auxiliary_device *auxdev,
1336			    const struct auxiliary_device_id *aux_dev_id)
1337
1338{
1339	struct device *dev = &auxdev->dev;
1340	struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
1341	struct sdw_intel *sdw;
1342	struct sdw_cdns *cdns;
1343	struct sdw_bus *bus;
1344	int ret;
1345
1346	sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
1347	if (!sdw)
1348		return -ENOMEM;
1349
1350	cdns = &sdw->cdns;
1351	bus = &cdns->bus;
1352
1353	sdw->instance = auxdev->id;
1354	sdw->link_res = &ldev->link_res;
1355	cdns->dev = dev;
1356	cdns->registers = sdw->link_res->registers;
1357	cdns->instance = sdw->instance;
1358	cdns->msg_count = 0;
1359
1360	bus->link_id = auxdev->id;
1361
1362	sdw_cdns_probe(cdns);
1363
1364	/* Set property read ops */
1365	sdw_intel_ops.read_prop = intel_prop_read;
1366	bus->ops = &sdw_intel_ops;
1367
1368	/* set driver data, accessed by snd_soc_dai_get_drvdata() */
1369	dev_set_drvdata(dev, cdns);
1370
1371	/* use generic bandwidth allocation algorithm */
1372	sdw->cdns.bus.compute_params = sdw_compute_params;
1373
1374	ret = sdw_bus_master_add(bus, dev, dev->fwnode);
1375	if (ret) {
1376		dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
1377		return ret;
1378	}
1379
1380	if (bus->prop.hw_disabled)
1381		dev_info(dev,
1382			 "SoundWire master %d is disabled, will be ignored\n",
1383			 bus->link_id);
1384	/*
1385	 * Ignore BIOS err_threshold, it's a really bad idea when dealing
1386	 * with multiple hardware synchronized links
1387	 */
1388	bus->prop.err_threshold = 0;
1389
1390	return 0;
1391}
1392
1393int intel_link_startup(struct auxiliary_device *auxdev)
1394{
1395	struct sdw_cdns_stream_config config;
1396	struct device *dev = &auxdev->dev;
1397	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1398	struct sdw_intel *sdw = cdns_to_intel(cdns);
1399	struct sdw_bus *bus = &cdns->bus;
1400	int link_flags;
1401	bool multi_link;
1402	u32 clock_stop_quirks;
1403	int ret;
1404
1405	if (bus->prop.hw_disabled) {
1406		dev_info(dev,
1407			 "SoundWire master %d is disabled, ignoring\n",
1408			 sdw->instance);
1409		return 0;
1410	}
1411
1412	link_flags = md_flags >> (bus->link_id * 8);
1413	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1414	if (!multi_link) {
1415		dev_dbg(dev, "Multi-link is disabled\n");
1416		bus->multi_link = false;
1417	} else {
1418		/*
1419		 * hardware-based synchronization is required regardless
1420		 * of the number of segments used by a stream: SSP-based
1421		 * synchronization is gated by gsync when the multi-master
1422		 * mode is set.
1423		 */
1424		bus->multi_link = true;
1425		bus->hw_sync_min_links = 1;
1426	}
1427
1428	/* Initialize shim, controller */
1429	ret = intel_init(sdw);
1430	if (ret)
1431		goto err_init;
1432
1433	/* Read the PDI config and initialize cadence PDI */
1434	intel_pdi_init(sdw, &config);
1435	ret = sdw_cdns_pdi_init(cdns, config);
1436	if (ret)
1437		goto err_init;
1438
1439	intel_pdi_ch_update(sdw);
1440
1441	ret = sdw_cdns_enable_interrupt(cdns, true);
1442	if (ret < 0) {
1443		dev_err(dev, "cannot enable interrupts\n");
1444		goto err_init;
1445	}
1446
1447	/*
1448	 * follow recommended programming flows to avoid timeouts when
1449	 * gsync is enabled
1450	 */
1451	if (multi_link)
1452		intel_shim_sync_arm(sdw);
1453
1454	ret = sdw_cdns_init(cdns);
1455	if (ret < 0) {
1456		dev_err(dev, "unable to initialize Cadence IP\n");
1457		goto err_interrupt;
1458	}
1459
1460	ret = sdw_cdns_exit_reset(cdns);
1461	if (ret < 0) {
1462		dev_err(dev, "unable to exit bus reset sequence\n");
1463		goto err_interrupt;
1464	}
1465
1466	if (multi_link) {
1467		ret = intel_shim_sync_go(sdw);
1468		if (ret < 0) {
1469			dev_err(dev, "sync go failed: %d\n", ret);
1470			goto err_interrupt;
1471		}
1472	}
1473
1474	/* Register DAIs */
1475	ret = intel_register_dai(sdw);
1476	if (ret) {
1477		dev_err(dev, "DAI registration failed: %d\n", ret);
1478		snd_soc_unregister_component(dev);
1479		goto err_interrupt;
1480	}
1481
1482	intel_debugfs_init(sdw);
1483
1484	/* Enable runtime PM */
1485	if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
1486		pm_runtime_set_autosuspend_delay(dev,
1487						 INTEL_MASTER_SUSPEND_DELAY_MS);
1488		pm_runtime_use_autosuspend(dev);
1489		pm_runtime_mark_last_busy(dev);
1490
1491		pm_runtime_set_active(dev);
1492		pm_runtime_enable(dev);
1493	}
1494
1495	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1496	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
1497		/*
1498		 * To keep the clock running we need to prevent
1499		 * pm_runtime suspend from happening by increasing the
1500		 * reference count.
1501		 * This quirk is specified by the parent PCI device in
1502		 * case of specific latency requirements. It will have
1503		 * no effect if pm_runtime is disabled by the user via
1504		 * a module parameter for testing purposes.
1505		 */
1506		pm_runtime_get_noresume(dev);
1507	}
1508
1509	/*
1510	 * The runtime PM status of Slave devices is "Unsupported"
1511	 * until they report as ATTACHED. If they don't, e.g. because
1512	 * there are no Slave devices populated or if the power-on is
1513	 * delayed or dependent on a power switch, the Master will
1514	 * remain active and prevent its parent from suspending.
1515	 *
1516	 * Conditionally force the pm_runtime core to re-evaluate the
1517	 * Master status in the absence of any Slave activity. A quirk
1518	 * is provided to e.g. deal with Slaves that may be powered on
1519	 * with a delay. A more complete solution would require the
1520	 * definition of Master properties.
1521	 */
1522	if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1523		pm_runtime_idle(dev);
1524
1525	return 0;
1526
1527err_interrupt:
1528	sdw_cdns_enable_interrupt(cdns, false);
1529err_init:
1530	return ret;
1531}
1532
1533static void intel_link_remove(struct auxiliary_device *auxdev)
1534{
1535	struct device *dev = &auxdev->dev;
1536	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1537	struct sdw_intel *sdw = cdns_to_intel(cdns);
1538	struct sdw_bus *bus = &cdns->bus;
1539
1540	/*
1541	 * Since pm_runtime is already disabled, we don't decrease
1542	 * the refcount when the clock_stop_quirk is
1543	 * SDW_INTEL_CLK_STOP_NOT_ALLOWED
1544	 */
1545	if (!bus->prop.hw_disabled) {
1546		intel_debugfs_exit(sdw);
1547		sdw_cdns_enable_interrupt(cdns, false);
1548		snd_soc_unregister_component(dev);
1549	}
1550	sdw_bus_master_delete(bus);
1551}
1552
1553int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
1554{
1555	struct device *dev = &auxdev->dev;
1556	struct sdw_intel *sdw;
1557	struct sdw_bus *bus;
1558	void __iomem *shim;
1559	u16 wake_sts;
1560
1561	sdw = dev_get_drvdata(dev);
1562	bus = &sdw->cdns.bus;
1563
1564	if (bus->prop.hw_disabled) {
1565		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n", bus->link_id);
1566		return 0;
1567	}
1568
1569	shim = sdw->link_res->shim;
1570	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
1571
1572	if (!(wake_sts & BIT(sdw->instance)))
1573		return 0;
1574
1575	/* disable WAKEEN interrupt ASAP to prevent interrupt flood */
1576	intel_shim_wake(sdw, false);
1577
1578	/*
1579	 * resume the Master, which will generate a bus reset and result in
1580	 * Slaves re-attaching and be re-enumerated. The SoundWire physical
1581	 * device which generated the wake will trigger an interrupt, which
1582	 * will in turn cause the corresponding Linux Slave device to be
1583	 * resumed and the Slave codec driver to check the status.
1584	 */
1585	pm_request_resume(dev);
1586
1587	return 0;
1588}
1589
1590/*
1591 * PM calls
1592 */
1593
1594static int __maybe_unused intel_suspend(struct device *dev)
1595{
1596	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1597	struct sdw_intel *sdw = cdns_to_intel(cdns);
1598	struct sdw_bus *bus = &cdns->bus;
1599	u32 clock_stop_quirks;
1600	int ret;
1601
1602	if (bus->prop.hw_disabled) {
1603		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1604			bus->link_id);
1605		return 0;
1606	}
1607
1608	if (pm_runtime_suspended(dev)) {
1609		dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
1610
1611		clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1612
1613		if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1614		     !clock_stop_quirks) &&
1615		    !pm_runtime_suspended(dev->parent)) {
1616
1617			/*
1618			 * if we've enabled clock stop, and the parent
1619			 * is still active, disable shim wake. The
1620			 * SHIM registers are not accessible if the
1621			 * parent is already pm_runtime suspended so
1622			 * it's too late to change that configuration
1623			 */
1624
1625			intel_shim_wake(sdw, false);
1626		}
1627
1628		return 0;
1629	}
1630
1631	ret = sdw_cdns_enable_interrupt(cdns, false);
1632	if (ret < 0) {
1633		dev_err(dev, "cannot disable interrupts on suspend\n");
1634		return ret;
1635	}
1636
1637	ret = intel_link_power_down(sdw);
1638	if (ret) {
1639		dev_err(dev, "Link power down failed: %d\n", ret);
1640		return ret;
1641	}
1642
1643	intel_shim_wake(sdw, false);
1644
1645	return 0;
1646}
1647
1648static int __maybe_unused intel_suspend_runtime(struct device *dev)
1649{
1650	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1651	struct sdw_intel *sdw = cdns_to_intel(cdns);
1652	struct sdw_bus *bus = &cdns->bus;
1653	u32 clock_stop_quirks;
1654	int ret;
1655
1656	if (bus->prop.hw_disabled) {
1657		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1658			bus->link_id);
1659		return 0;
1660	}
1661
1662	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1663
1664	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1665
1666		ret = sdw_cdns_enable_interrupt(cdns, false);
1667		if (ret < 0) {
1668			dev_err(dev, "cannot disable interrupts on suspend\n");
1669			return ret;
1670		}
1671
1672		ret = intel_link_power_down(sdw);
1673		if (ret) {
1674			dev_err(dev, "Link power down failed: %d\n", ret);
1675			return ret;
1676		}
1677
1678		intel_shim_wake(sdw, false);
1679
1680	} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1681		   !clock_stop_quirks) {
1682		bool wake_enable = true;
1683
1684		ret = sdw_cdns_clock_stop(cdns, true);
1685		if (ret < 0) {
1686			dev_err(dev, "cannot enable clock stop on suspend\n");
1687			wake_enable = false;
1688		}
1689
1690		ret = sdw_cdns_enable_interrupt(cdns, false);
1691		if (ret < 0) {
1692			dev_err(dev, "cannot disable interrupts on suspend\n");
1693			return ret;
1694		}
1695
1696		ret = intel_link_power_down(sdw);
1697		if (ret) {
1698			dev_err(dev, "Link power down failed: %d\n", ret);
1699			return ret;
1700		}
1701
1702		intel_shim_wake(sdw, wake_enable);
1703	} else {
1704		dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1705			__func__, clock_stop_quirks);
1706		ret = -EINVAL;
1707	}
1708
1709	return ret;
1710}
1711
1712static int __maybe_unused intel_resume(struct device *dev)
1713{
1714	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1715	struct sdw_intel *sdw = cdns_to_intel(cdns);
1716	struct sdw_bus *bus = &cdns->bus;
1717	int link_flags;
1718	bool multi_link;
1719	int ret;
1720
1721	if (bus->prop.hw_disabled) {
1722		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1723			bus->link_id);
1724		return 0;
1725	}
1726
1727	link_flags = md_flags >> (bus->link_id * 8);
1728	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1729
1730	if (pm_runtime_suspended(dev)) {
1731		dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
1732
1733		/* follow required sequence from runtime_pm.rst */
1734		pm_runtime_disable(dev);
1735		pm_runtime_set_active(dev);
1736		pm_runtime_mark_last_busy(dev);
1737		pm_runtime_enable(dev);
1738
1739		link_flags = md_flags >> (bus->link_id * 8);
1740
1741		if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1742			pm_runtime_idle(dev);
1743	}
1744
1745	ret = intel_init(sdw);
1746	if (ret) {
1747		dev_err(dev, "%s failed: %d\n", __func__, ret);
1748		return ret;
1749	}
1750
1751	/*
1752	 * make sure all Slaves are tagged as UNATTACHED and provide
1753	 * reason for reinitialization
1754	 */
1755	sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1756
1757	ret = sdw_cdns_enable_interrupt(cdns, true);
1758	if (ret < 0) {
1759		dev_err(dev, "cannot enable interrupts during resume\n");
1760		return ret;
1761	}
1762
1763	/*
1764	 * follow recommended programming flows to avoid timeouts when
1765	 * gsync is enabled
1766	 */
1767	if (multi_link)
1768		intel_shim_sync_arm(sdw);
1769
1770	ret = sdw_cdns_init(&sdw->cdns);
1771	if (ret < 0) {
1772		dev_err(dev, "unable to initialize Cadence IP during resume\n");
1773		return ret;
1774	}
1775
1776	ret = sdw_cdns_exit_reset(cdns);
1777	if (ret < 0) {
1778		dev_err(dev, "unable to exit bus reset sequence during resume\n");
1779		return ret;
1780	}
1781
1782	if (multi_link) {
1783		ret = intel_shim_sync_go(sdw);
1784		if (ret < 0) {
1785			dev_err(dev, "sync go failed during resume\n");
1786			return ret;
1787		}
1788	}
1789
1790	/*
1791	 * after system resume, the pm_runtime suspend() may kick in
1792	 * during the enumeration, before any children device force the
1793	 * master device to remain active.  Using pm_runtime_get()
1794	 * routines is not really possible, since it'd prevent the
1795	 * master from suspending.
1796	 * A reasonable compromise is to update the pm_runtime
1797	 * counters and delay the pm_runtime suspend by several
1798	 * seconds, by when all enumeration should be complete.
1799	 */
1800	pm_runtime_mark_last_busy(dev);
1801
1802	return ret;
1803}
1804
1805static int __maybe_unused intel_resume_runtime(struct device *dev)
1806{
1807	struct sdw_cdns *cdns = dev_get_drvdata(dev);
1808	struct sdw_intel *sdw = cdns_to_intel(cdns);
1809	struct sdw_bus *bus = &cdns->bus;
1810	u32 clock_stop_quirks;
1811	bool clock_stop0;
1812	int link_flags;
1813	bool multi_link;
1814	int status;
1815	int ret;
1816
1817	if (bus->prop.hw_disabled) {
1818		dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1819			bus->link_id);
1820		return 0;
1821	}
1822
1823	link_flags = md_flags >> (bus->link_id * 8);
1824	multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1825
1826	clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1827
1828	if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1829		ret = intel_init(sdw);
1830		if (ret) {
1831			dev_err(dev, "%s failed: %d\n", __func__, ret);
1832			return ret;
1833		}
1834
1835		/*
1836		 * make sure all Slaves are tagged as UNATTACHED and provide
1837		 * reason for reinitialization
1838		 */
1839		sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1840
1841		ret = sdw_cdns_enable_interrupt(cdns, true);
1842		if (ret < 0) {
1843			dev_err(dev, "cannot enable interrupts during resume\n");
1844			return ret;
1845		}
1846
1847		/*
1848		 * follow recommended programming flows to avoid
1849		 * timeouts when gsync is enabled
1850		 */
1851		if (multi_link)
1852			intel_shim_sync_arm(sdw);
1853
1854		ret = sdw_cdns_init(&sdw->cdns);
1855		if (ret < 0) {
1856			dev_err(dev, "unable to initialize Cadence IP during resume\n");
1857			return ret;
1858		}
1859
1860		ret = sdw_cdns_exit_reset(cdns);
1861		if (ret < 0) {
1862			dev_err(dev, "unable to exit bus reset sequence during resume\n");
1863			return ret;
1864		}
1865
1866		if (multi_link) {
1867			ret = intel_shim_sync_go(sdw);
1868			if (ret < 0) {
1869				dev_err(dev, "sync go failed during resume\n");
1870				return ret;
1871			}
1872		}
1873	} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
1874		ret = intel_init(sdw);
1875		if (ret) {
1876			dev_err(dev, "%s failed: %d\n", __func__, ret);
1877			return ret;
1878		}
1879
1880		/*
1881		 * An exception condition occurs for the CLK_STOP_BUS_RESET
1882		 * case if one or more masters remain active. In this condition,
1883		 * all the masters are powered on for they are in the same power
1884		 * domain. Master can preserve its context for clock stop0, so
1885		 * there is no need to clear slave status and reset bus.
1886		 */
1887		clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1888
1889		if (!clock_stop0) {
1890
1891			/*
1892			 * make sure all Slaves are tagged as UNATTACHED and
1893			 * provide reason for reinitialization
1894			 */
1895
1896			status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1897			sdw_clear_slave_status(bus, status);
1898
1899			ret = sdw_cdns_enable_interrupt(cdns, true);
1900			if (ret < 0) {
1901				dev_err(dev, "cannot enable interrupts during resume\n");
1902				return ret;
1903			}
1904
1905			/*
1906			 * follow recommended programming flows to avoid
1907			 * timeouts when gsync is enabled
1908			 */
1909			if (multi_link)
1910				intel_shim_sync_arm(sdw);
1911
1912			/*
1913			 * Re-initialize the IP since it was powered-off
1914			 */
1915			sdw_cdns_init(&sdw->cdns);
1916
1917		} else {
1918			ret = sdw_cdns_enable_interrupt(cdns, true);
1919			if (ret < 0) {
1920				dev_err(dev, "cannot enable interrupts during resume\n");
1921				return ret;
1922			}
1923		}
1924
1925		ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1926		if (ret < 0) {
1927			dev_err(dev, "unable to restart clock during resume\n");
1928			return ret;
1929		}
1930
1931		if (!clock_stop0) {
1932			ret = sdw_cdns_exit_reset(cdns);
1933			if (ret < 0) {
1934				dev_err(dev, "unable to exit bus reset sequence during resume\n");
1935				return ret;
1936			}
1937
1938			if (multi_link) {
1939				ret = intel_shim_sync_go(sdw);
1940				if (ret < 0) {
1941					dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1942					return ret;
1943				}
1944			}
1945		}
1946	} else if (!clock_stop_quirks) {
1947
1948		clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1949		if (!clock_stop0)
1950			dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
1951
1952		ret = intel_init(sdw);
1953		if (ret) {
1954			dev_err(dev, "%s failed: %d\n", __func__, ret);
1955			return ret;
1956		}
1957
1958		ret = sdw_cdns_enable_interrupt(cdns, true);
1959		if (ret < 0) {
1960			dev_err(dev, "cannot enable interrupts during resume\n");
1961			return ret;
1962		}
1963
1964		ret = sdw_cdns_clock_restart(cdns, false);
1965		if (ret < 0) {
1966			dev_err(dev, "unable to resume master during resume\n");
1967			return ret;
1968		}
1969	} else {
1970		dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1971			__func__, clock_stop_quirks);
1972		ret = -EINVAL;
1973	}
1974
1975	return ret;
1976}
1977
1978static const struct dev_pm_ops intel_pm = {
1979	SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
1980	SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
1981};
1982
1983static const struct auxiliary_device_id intel_link_id_table[] = {
1984	{ .name = "soundwire_intel.link" },
1985	{},
1986};
1987MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table);
1988
1989static struct auxiliary_driver sdw_intel_drv = {
1990	.probe = intel_link_probe,
1991	.remove = intel_link_remove,
1992	.driver = {
1993		/* auxiliary_driver_register() sets .name to be the modname */
1994		.pm = &intel_pm,
1995	},
1996	.id_table = intel_link_id_table
1997};
1998module_auxiliary_driver(sdw_intel_drv);
1999
2000MODULE_LICENSE("Dual BSD/GPL");
2001MODULE_DESCRIPTION("Intel Soundwire Link Driver");