Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2// Copyright(c) 2015-17 Intel Corporation.
   3
   4/*
   5 * Soundwire Intel Master Driver
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/cleanup.h>
  10#include <linux/debugfs.h>
  11#include <linux/delay.h>
  12#include <linux/io.h>
  13#include <sound/pcm_params.h>
  14#include <linux/pm_runtime.h>
  15#include <sound/soc.h>
  16#include <linux/soundwire/sdw_registers.h>
  17#include <linux/soundwire/sdw.h>
  18#include <linux/soundwire/sdw_intel.h>
  19#include "cadence_master.h"
  20#include "bus.h"
  21#include "intel.h"
  22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  23static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
  24{
  25	int timeout = 10;
  26	u32 reg_read;
  27
  28	do {
  29		reg_read = readl(base + offset);
  30		if ((reg_read & mask) == target)
  31			return 0;
  32
  33		timeout--;
  34		usleep_range(50, 100);
  35	} while (timeout != 0);
  36
  37	return -EAGAIN;
  38}
  39
  40static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
  41{
  42	writel(value, base + offset);
  43	return intel_wait_bit(base, offset, mask, 0);
  44}
  45
  46static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
  47{
  48	writel(value, base + offset);
  49	return intel_wait_bit(base, offset, mask, mask);
  50}
  51
  52/*
  53 * debugfs
  54 */
  55#ifdef CONFIG_DEBUG_FS
  56
  57#define RD_BUF (2 * PAGE_SIZE)
  58
  59static ssize_t intel_sprintf(void __iomem *mem, bool l,
  60			     char *buf, size_t pos, unsigned int reg)
  61{
  62	int value;
  63
  64	if (l)
  65		value = intel_readl(mem, reg);
  66	else
  67		value = intel_readw(mem, reg);
  68
  69	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
  70}
  71
  72static int intel_reg_show(struct seq_file *s_file, void *data)
  73{
  74	struct sdw_intel *sdw = s_file->private;
  75	void __iomem *s = sdw->link_res->shim;
  76	void __iomem *a = sdw->link_res->alh;
 
  77	ssize_t ret;
  78	int i, j;
  79	unsigned int links, reg;
  80
  81	char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL);
  82	if (!buf)
  83		return -ENOMEM;
  84
  85	links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
  86
  87	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
  88	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
  89
  90	for (i = 0; i < links; i++) {
  91		reg = SDW_SHIM_LCAP + i * 4;
  92		ret += intel_sprintf(s, true, buf, ret, reg);
  93	}
  94
  95	for (i = 0; i < links; i++) {
  96		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
  97		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
  98		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
  99		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
 100		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
 101		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
 102		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
 103
 104		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
 105
 106		/*
 107		 * the value 10 is the number of PDIs. We will need a
 108		 * cleanup to remove hard-coded Intel configurations
 109		 * from cadence_master.c
 110		 */
 111		for (j = 0; j < 10; j++) {
 112			ret += intel_sprintf(s, false, buf, ret,
 113					SDW_SHIM_PCMSYCHM(i, j));
 114			ret += intel_sprintf(s, false, buf, ret,
 115					SDW_SHIM_PCMSYCHC(i, j));
 116		}
 117		ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
 118
 119		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
 120		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
 121	}
 122
 123	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
 124	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
 125	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
 126
 127	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
 128	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
 129		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
 130
 131	seq_printf(s_file, "%s", buf);
 
 132
 133	return 0;
 134}
 135DEFINE_SHOW_ATTRIBUTE(intel_reg);
 136
 137static int intel_set_m_datamode(void *data, u64 value)
 138{
 139	struct sdw_intel *sdw = data;
 140	struct sdw_bus *bus = &sdw->cdns.bus;
 141
 142	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 143		return -EINVAL;
 144
 145	/* Userspace changed the hardware state behind the kernel's back */
 146	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 147
 148	bus->params.m_data_mode = value;
 149
 150	return 0;
 151}
 152DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
 153			 intel_set_m_datamode, "%llu\n");
 154
 155static int intel_set_s_datamode(void *data, u64 value)
 156{
 157	struct sdw_intel *sdw = data;
 158	struct sdw_bus *bus = &sdw->cdns.bus;
 159
 160	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 161		return -EINVAL;
 162
 163	/* Userspace changed the hardware state behind the kernel's back */
 164	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 165
 166	bus->params.s_data_mode = value;
 167
 168	return 0;
 169}
 170DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
 171			 intel_set_s_datamode, "%llu\n");
 172
 173static void intel_debugfs_init(struct sdw_intel *sdw)
 174{
 175	struct dentry *root = sdw->cdns.bus.debugfs;
 176
 177	if (!root)
 178		return;
 179
 180	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
 181
 182	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
 183			    &intel_reg_fops);
 184
 185	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
 186			    &intel_set_m_datamode_fops);
 187
 188	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
 189			    &intel_set_s_datamode_fops);
 190
 191	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
 192}
 193
 194static void intel_debugfs_exit(struct sdw_intel *sdw)
 195{
 196	debugfs_remove_recursive(sdw->debugfs);
 197}
 198#else
 199static void intel_debugfs_init(struct sdw_intel *sdw) {}
 200static void intel_debugfs_exit(struct sdw_intel *sdw) {}
 201#endif /* CONFIG_DEBUG_FS */
 202
 203/*
 204 * shim ops
 205 */
 206/* this needs to be called with shim_lock */
 207static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
 208{
 209	void __iomem *shim = sdw->link_res->shim;
 210	unsigned int link_id = sdw->instance;
 211	u16 ioctl;
 212
 213	/* Switch to MIP from Glue logic */
 214	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
 215
 216	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
 217	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 218	usleep_range(10, 15);
 219
 220	ioctl &= ~(SDW_SHIM_IOCTL_DO);
 221	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 222	usleep_range(10, 15);
 223
 224	ioctl |= (SDW_SHIM_IOCTL_MIF);
 225	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 226	usleep_range(10, 15);
 227
 228	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
 229	ioctl &= ~(SDW_SHIM_IOCTL_COE);
 230	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 231	usleep_range(10, 15);
 232
 233	/* at this point Master IP has full control of the I/Os */
 234}
 235
 236/* this needs to be called with shim_lock */
 237static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
 238{
 239	unsigned int link_id = sdw->instance;
 240	void __iomem *shim = sdw->link_res->shim;
 241	u16 ioctl;
 242
 243	/* Glue logic */
 244	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
 245	ioctl |= SDW_SHIM_IOCTL_BKE;
 246	ioctl |= SDW_SHIM_IOCTL_COE;
 247	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 248	usleep_range(10, 15);
 249
 250	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
 251	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 252	usleep_range(10, 15);
 253
 254	/* at this point Integration Glue has full control of the I/Os */
 255}
 256
 257/* this needs to be called with shim_lock */
 258static void intel_shim_init(struct sdw_intel *sdw)
 259{
 260	void __iomem *shim = sdw->link_res->shim;
 261	unsigned int link_id = sdw->instance;
 262	u16 ioctl = 0, act;
 263
 264	/* Initialize Shim */
 265	ioctl |= SDW_SHIM_IOCTL_BKE;
 266	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 267	usleep_range(10, 15);
 268
 269	ioctl |= SDW_SHIM_IOCTL_WPDD;
 270	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 271	usleep_range(10, 15);
 272
 273	ioctl |= SDW_SHIM_IOCTL_DO;
 274	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 275	usleep_range(10, 15);
 276
 277	ioctl |= SDW_SHIM_IOCTL_DOE;
 278	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 279	usleep_range(10, 15);
 280
 281	intel_shim_glue_to_master_ip(sdw);
 282
 283	act = intel_readw(shim, SDW_SHIM_CTMCTL(link_id));
 284	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
 285	act |= SDW_SHIM_CTMCTL_DACTQE;
 286	act |= SDW_SHIM_CTMCTL_DODS;
 287	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
 288	usleep_range(10, 15);
 289}
 290
 291static int intel_shim_check_wake(struct sdw_intel *sdw)
 292{
 293	void __iomem *shim;
 294	u16 wake_sts;
 295
 296	shim = sdw->link_res->shim;
 297	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 298
 299	return wake_sts & BIT(sdw->instance);
 300}
 301
 302static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
 303{
 304	void __iomem *shim = sdw->link_res->shim;
 305	unsigned int link_id = sdw->instance;
 306	u16 wake_en, wake_sts;
 307
 308	mutex_lock(sdw->link_res->shim_lock);
 309	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
 310
 311	if (wake_enable) {
 312		/* Enable the wakeup */
 313		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 314		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 315	} else {
 316		/* Disable the wake up interrupt */
 317		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
 318		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 319
 320		/* Clear wake status */
 321		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 322		wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
 323		intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
 324	}
 325	mutex_unlock(sdw->link_res->shim_lock);
 326}
 327
 328static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
 329{
 330	void __iomem *shim = sdw->link_res->shim;
 331	int sync_reg;
 332
 333	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 334	return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK);
 335}
 336
 337static int intel_link_power_up(struct sdw_intel *sdw)
 338{
 339	unsigned int link_id = sdw->instance;
 340	void __iomem *shim = sdw->link_res->shim;
 341	u32 *shim_mask = sdw->link_res->shim_mask;
 342	struct sdw_bus *bus = &sdw->cdns.bus;
 343	struct sdw_master_prop *prop = &bus->prop;
 344	u32 spa_mask, cpa_mask;
 345	u32 link_control;
 346	int ret = 0;
 347	u32 clock_source;
 348	u32 syncprd;
 349	u32 sync_reg;
 350	bool lcap_mlcs;
 351
 352	mutex_lock(sdw->link_res->shim_lock);
 353
 354	/*
 355	 * The hardware relies on an internal counter, typically 4kHz,
 356	 * to generate the SoundWire SSP - which defines a 'safe'
 357	 * synchronization point between commands and audio transport
 358	 * and allows for multi link synchronization. The SYNCPRD value
 359	 * is only dependent on the oscillator clock provided to
 360	 * the IP, so adjust based on _DSD properties reported in DSDT
 361	 * tables. The values reported are based on either 24MHz
 362	 * (CNL/CML) or 38.4 MHz (ICL/TGL+). On MeteorLake additional
 363	 * frequencies are available with the MLCS clock source selection.
 364	 */
 365	lcap_mlcs = intel_readl(shim, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_MLCS_MASK;
 366
 367	if (prop->mclk_freq % 6000000) {
 368		if (prop->mclk_freq % 2400000) {
 369			if (lcap_mlcs) {
 370				syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576;
 371				clock_source = SDW_SHIM_MLCS_CARDINAL_CLK;
 372			} else {
 373				dev_err(sdw->cdns.dev, "%s: invalid clock configuration, mclk %d lcap_mlcs %d\n",
 374					__func__, prop->mclk_freq, lcap_mlcs);
 375				ret = -EINVAL;
 376				goto out;
 377			}
 378		} else {
 379			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
 380			clock_source = SDW_SHIM_MLCS_XTAL_CLK;
 381		}
 382	} else {
 383		if (lcap_mlcs) {
 384			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96;
 385			clock_source = SDW_SHIM_MLCS_AUDIO_PLL_CLK;
 386		} else {
 387			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
 388			clock_source = SDW_SHIM_MLCS_XTAL_CLK;
 389		}
 390	}
 391
 392	if (!*shim_mask) {
 393		dev_dbg(sdw->cdns.dev, "powering up all links\n");
 394
 395		/* we first need to program the SyncPRD/CPU registers */
 396		dev_dbg(sdw->cdns.dev,
 397			"first link up, programming SYNCPRD\n");
 398
 399		/* set SyncPRD period */
 400		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 401		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
 402
 403		/* Set SyncCPU bit */
 404		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
 405		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 406
 407		/* Link power up sequence */
 408		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 409
 410		/* only power-up enabled links */
 411		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
 412		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 413
 414		link_control |=  spa_mask;
 415
 416		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 417		if (ret < 0) {
 418			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
 419			goto out;
 420		}
 421
 422		/* SyncCPU will change once link is active */
 423		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
 424				     SDW_SHIM_SYNC_SYNCCPU, 0);
 425		if (ret < 0) {
 426			dev_err(sdw->cdns.dev,
 427				"Failed to set SHIM_SYNC: %d\n", ret);
 428			goto out;
 429		}
 430
 431		/* update link clock if needed */
 432		if (lcap_mlcs) {
 433			link_control = intel_readl(shim, SDW_SHIM_LCTL);
 434			u32p_replace_bits(&link_control, clock_source, SDW_SHIM_LCTL_MLCS_MASK);
 435			intel_writel(shim, SDW_SHIM_LCTL, link_control);
 436		}
 437	}
 438
 439	*shim_mask |= BIT(link_id);
 440
 441	sdw->cdns.link_up = true;
 442
 443	intel_shim_init(sdw);
 444
 445out:
 446	mutex_unlock(sdw->link_res->shim_lock);
 447
 448	return ret;
 449}
 450
 451static int intel_link_power_down(struct sdw_intel *sdw)
 452{
 453	u32 link_control, spa_mask, cpa_mask;
 454	unsigned int link_id = sdw->instance;
 455	void __iomem *shim = sdw->link_res->shim;
 456	u32 *shim_mask = sdw->link_res->shim_mask;
 457	int ret = 0;
 458
 459	mutex_lock(sdw->link_res->shim_lock);
 460
 461	if (!(*shim_mask & BIT(link_id)))
 462		dev_err(sdw->cdns.dev,
 463			"%s: Unbalanced power-up/down calls\n", __func__);
 464
 465	sdw->cdns.link_up = false;
 466
 467	intel_shim_master_ip_to_glue(sdw);
 468
 469	*shim_mask &= ~BIT(link_id);
 470
 471	if (!*shim_mask) {
 472
 473		dev_dbg(sdw->cdns.dev, "powering down all links\n");
 474
 475		/* Link power down sequence */
 476		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 477
 478		/* only power-down enabled links */
 479		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
 480		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 481
 482		link_control &=  spa_mask;
 483
 484		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 485		if (ret < 0) {
 486			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
 487
 488			/*
 489			 * we leave the sdw->cdns.link_up flag as false since we've disabled
 490			 * the link at this point and cannot handle interrupts any longer.
 491			 */
 492		}
 493	}
 494
 495	mutex_unlock(sdw->link_res->shim_lock);
 496
 497	return ret;
 498}
 499
 500static void intel_shim_sync_arm(struct sdw_intel *sdw)
 501{
 502	void __iomem *shim = sdw->link_res->shim;
 503	u32 sync_reg;
 504
 505	mutex_lock(sdw->link_res->shim_lock);
 506
 507	/* update SYNC register */
 508	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 509	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
 510	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 511
 512	mutex_unlock(sdw->link_res->shim_lock);
 513}
 514
 515static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
 516{
 517	void __iomem *shim = sdw->link_res->shim;
 518	u32 sync_reg;
 
 519
 520	/* Read SYNC register */
 521	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 522
 523	/*
 524	 * Set SyncGO bit to synchronously trigger a bank switch for
 525	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
 526	 * the Masters.
 527	 */
 528	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
 529
 530	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 
 531
 532	return 0;
 
 
 
 533}
 534
 535static int intel_shim_sync_go(struct sdw_intel *sdw)
 536{
 537	int ret;
 538
 539	mutex_lock(sdw->link_res->shim_lock);
 540
 541	ret = intel_shim_sync_go_unlocked(sdw);
 542
 543	mutex_unlock(sdw->link_res->shim_lock);
 544
 545	return ret;
 546}
 547
 548/*
 549 * PDI routines
 550 */
 551static void intel_pdi_init(struct sdw_intel *sdw,
 552			   struct sdw_cdns_stream_config *config)
 553{
 554	void __iomem *shim = sdw->link_res->shim;
 555	unsigned int link_id = sdw->instance;
 556	int pcm_cap;
 557
 558	/* PCM Stream Capability */
 559	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
 560
 561	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
 562	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
 563	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
 564
 565	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
 566		config->pcm_bd, config->pcm_in, config->pcm_out);
 567}
 568
 569static int
 570intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
 571{
 572	void __iomem *shim = sdw->link_res->shim;
 573	unsigned int link_id = sdw->instance;
 574	int count;
 575
 576	count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
 577
 578	/*
 579	 * WORKAROUND: on all existing Intel controllers, pdi
 580	 * number 2 reports channel count as 1 even though it
 581	 * supports 8 channels. Performing hardcoding for pdi
 582	 * number 2.
 583	 */
 584	if (pdi_num == 2)
 585		count = 7;
 586
 587	/* zero based values for channel count in register */
 588	count++;
 589
 590	return count;
 591}
 592
 593static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
 594				   struct sdw_cdns_pdi *pdi,
 595				   unsigned int num_pdi,
 596				   unsigned int *num_ch)
 597{
 598	int i, ch_count = 0;
 599
 600	for (i = 0; i < num_pdi; i++) {
 601		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
 602		ch_count += pdi->ch_count;
 603		pdi++;
 604	}
 605
 606	*num_ch = ch_count;
 607	return 0;
 608}
 609
 610static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
 611				      struct sdw_cdns_streams *stream)
 612{
 613	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
 614				&stream->num_ch_bd);
 615
 616	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
 617				&stream->num_ch_in);
 618
 619	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
 620				&stream->num_ch_out);
 621
 622	return 0;
 623}
 624
 
 
 
 
 
 
 
 625static void
 626intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 627{
 628	void __iomem *shim = sdw->link_res->shim;
 629	unsigned int link_id = sdw->instance;
 630	int pdi_conf = 0;
 631
 632	/* the Bulk and PCM streams are not contiguous */
 633	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 634	if (pdi->num >= 2)
 635		pdi->intel_alh_id += 2;
 636
 637	/*
 638	 * Program stream parameters to stream SHIM register
 639	 * This is applicable for PCM stream only.
 640	 */
 641	if (pdi->type != SDW_STREAM_PCM)
 642		return;
 643
 644	if (pdi->dir == SDW_DATA_DIR_RX)
 645		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
 646	else
 647		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
 648
 649	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
 650	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
 651	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
 652
 653	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
 654}
 655
 656static void
 657intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 658{
 659	void __iomem *alh = sdw->link_res->alh;
 660	unsigned int link_id = sdw->instance;
 661	unsigned int conf;
 662
 663	/* the Bulk and PCM streams are not contiguous */
 664	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 665	if (pdi->num >= 2)
 666		pdi->intel_alh_id += 2;
 667
 668	/* Program Stream config ALH register */
 669	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
 670
 671	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
 672	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
 673
 674	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
 675}
 676
 677static int intel_params_stream(struct sdw_intel *sdw,
 678			       struct snd_pcm_substream *substream,
 679			       struct snd_soc_dai *dai,
 680			       struct snd_pcm_hw_params *hw_params,
 681			       int link_id, int alh_stream_id)
 682{
 683	struct sdw_intel_link_res *res = sdw->link_res;
 684	struct sdw_intel_stream_params_data params_data;
 685
 686	params_data.substream = substream;
 687	params_data.dai = dai;
 688	params_data.hw_params = hw_params;
 689	params_data.link_id = link_id;
 690	params_data.alh_stream_id = alh_stream_id;
 691
 692	if (res->ops && res->ops->params_stream && res->dev)
 693		return res->ops->params_stream(res->dev,
 694					       &params_data);
 695	return -EIO;
 696}
 697
 698/*
 699 * DAI routines
 700 */
 701
 702static int intel_free_stream(struct sdw_intel *sdw,
 703			     struct snd_pcm_substream *substream,
 704			     struct snd_soc_dai *dai,
 705			     int link_id)
 706{
 707	struct sdw_intel_link_res *res = sdw->link_res;
 708	struct sdw_intel_stream_free_data free_data;
 709
 710	free_data.substream = substream;
 711	free_data.dai = dai;
 712	free_data.link_id = link_id;
 713
 714	if (res->ops && res->ops->free_stream && res->dev)
 715		return res->ops->free_stream(res->dev, &free_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 
 
 
 
 
 
 
 717	return 0;
 718}
 719
 720static int intel_hw_params(struct snd_pcm_substream *substream,
 721			   struct snd_pcm_hw_params *params,
 722			   struct snd_soc_dai *dai)
 723{
 724	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 725	struct sdw_intel *sdw = cdns_to_intel(cdns);
 726	struct sdw_cdns_dai_runtime *dai_runtime;
 727	struct sdw_cdns_pdi *pdi;
 728	struct sdw_stream_config sconfig;
 
 729	int ch, dir;
 730	int ret;
 731
 732	dai_runtime = cdns->dai_runtime_array[dai->id];
 733	if (!dai_runtime)
 734		return -EIO;
 735
 736	ch = params_channels(params);
 737	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 738		dir = SDW_DATA_DIR_RX;
 739	else
 740		dir = SDW_DATA_DIR_TX;
 741
 742	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
 743
 744	if (!pdi)
 745		return -EINVAL;
 
 
 746
 747	/* do run-time configurations for SHIM, ALH and PDI/PORT */
 748	intel_pdi_shim_configure(sdw, pdi);
 749	intel_pdi_alh_configure(sdw, pdi);
 750	sdw_cdns_config_stream(cdns, ch, dir, pdi);
 751
 752	/* store pdi and hw_params, may be needed in prepare step */
 753	dai_runtime->paused = false;
 754	dai_runtime->suspended = false;
 755	dai_runtime->pdi = pdi;
 
 756
 757	/* Inform DSP about PDI stream number */
 758	ret = intel_params_stream(sdw, substream, dai, params,
 759				  sdw->instance,
 760				  pdi->intel_alh_id);
 761	if (ret)
 762		return ret;
 763
 764	sconfig.direction = dir;
 765	sconfig.ch_count = ch;
 766	sconfig.frame_rate = params_rate(params);
 767	sconfig.type = dai_runtime->stream_type;
 768
 769	sconfig.bps = snd_pcm_format_width(params_format(params));
 770
 771	/* Port configuration */
 772	struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
 773								GFP_KERNEL);
 774	if (!pconfig)
 775		return -ENOMEM;
 
 776
 777	pconfig->num = pdi->num;
 778	pconfig->ch_mask = (1 << ch) - 1;
 779
 780	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
 781				    pconfig, 1, dai_runtime->stream);
 782	if (ret)
 783		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
 784
 
 
 785	return ret;
 786}
 787
 788static int intel_prepare(struct snd_pcm_substream *substream,
 789			 struct snd_soc_dai *dai)
 790{
 791	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 792	struct sdw_intel *sdw = cdns_to_intel(cdns);
 793	struct sdw_cdns_dai_runtime *dai_runtime;
 794	int ch, dir;
 795	int ret = 0;
 796
 797	dai_runtime = cdns->dai_runtime_array[dai->id];
 798	if (!dai_runtime) {
 799		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 800			__func__);
 801		return -EIO;
 802	}
 803
 804	if (dai_runtime->suspended) {
 805		struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
 806		struct snd_pcm_hw_params *hw_params;
 807
 808		hw_params = &rtd->dpcm[substream->stream].hw_params;
 809
 810		dai_runtime->suspended = false;
 811
 812		/*
 813		 * .prepare() is called after system resume, where we
 814		 * need to reinitialize the SHIM/ALH/Cadence IP.
 815		 * .prepare() is also called to deal with underflows,
 816		 * but in those cases we cannot touch ALH/SHIM
 817		 * registers
 818		 */
 819
 820		/* configure stream */
 821		ch = params_channels(hw_params);
 822		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 823			dir = SDW_DATA_DIR_RX;
 824		else
 825			dir = SDW_DATA_DIR_TX;
 826
 827		intel_pdi_shim_configure(sdw, dai_runtime->pdi);
 828		intel_pdi_alh_configure(sdw, dai_runtime->pdi);
 829		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
 830
 831		/* Inform DSP about PDI stream number */
 832		ret = intel_params_stream(sdw, substream, dai,
 833					  hw_params,
 834					  sdw->instance,
 835					  dai_runtime->pdi->intel_alh_id);
 836	}
 837
 838	return ret;
 839}
 840
 841static int
 842intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
 843{
 844	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 845	struct sdw_intel *sdw = cdns_to_intel(cdns);
 846	struct sdw_cdns_dai_runtime *dai_runtime;
 847	int ret;
 848
 849	dai_runtime = cdns->dai_runtime_array[dai->id];
 850	if (!dai_runtime)
 851		return -EIO;
 852
 853	/*
 854	 * The sdw stream state will transition to RELEASED when stream->
 855	 * master_list is empty. So the stream state will transition to
 856	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
 857	 * cpu-dai.
 858	 */
 859	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
 860	if (ret < 0) {
 861		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
 862			dai_runtime->stream->name, ret);
 863		return ret;
 864	}
 865
 866	ret = intel_free_stream(sdw, substream, dai, sdw->instance);
 867	if (ret < 0) {
 868		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
 869		return ret;
 870	}
 871
 
 872	dai_runtime->pdi = NULL;
 873
 874	return 0;
 875}
 876
 
 
 
 
 
 
 
 
 
 877static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
 878				    void *stream, int direction)
 879{
 880	return cdns_set_sdw_stream(dai, stream, direction);
 881}
 882
 883static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
 884				  int direction)
 885{
 886	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 887	struct sdw_cdns_dai_runtime *dai_runtime;
 888
 889	dai_runtime = cdns->dai_runtime_array[dai->id];
 890	if (!dai_runtime)
 891		return ERR_PTR(-EINVAL);
 892
 893	return dai_runtime->stream;
 894}
 895
 896static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
 897{
 898	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 
 
 899	struct sdw_cdns_dai_runtime *dai_runtime;
 900	int ret = 0;
 901
 
 
 
 
 
 
 
 
 902	dai_runtime = cdns->dai_runtime_array[dai->id];
 903	if (!dai_runtime) {
 904		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 905			__func__);
 906		return -EIO;
 907	}
 908
 909	switch (cmd) {
 910	case SNDRV_PCM_TRIGGER_SUSPEND:
 911
 912		/*
 913		 * The .prepare callback is used to deal with xruns and resume operations.
 914		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
 915		 * but for resume operations the DMAs and SHIM registers need to be initialized.
 916		 * the .trigger callback is used to track the suspend case only.
 917		 */
 918
 919		dai_runtime->suspended = true;
 920
 
 921		break;
 922
 923	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
 924		dai_runtime->paused = true;
 925		break;
 926	case SNDRV_PCM_TRIGGER_STOP:
 927	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 928		dai_runtime->paused = false;
 929		break;
 930	default:
 931		break;
 932	}
 933
 934	return ret;
 935}
 936
 937static int intel_component_probe(struct snd_soc_component *component)
 938{
 939	int ret;
 940
 941	/*
 942	 * make sure the device is pm_runtime_active before initiating
 943	 * bus transactions during the card registration.
 944	 * We use pm_runtime_resume() here, without taking a reference
 945	 * and releasing it immediately.
 946	 */
 947	ret = pm_runtime_resume(component->dev);
 948	if (ret < 0 && ret != -EACCES)
 949		return ret;
 950
 951	return 0;
 952}
 953
 954static int intel_component_dais_suspend(struct snd_soc_component *component)
 955{
 956	struct snd_soc_dai *dai;
 957
 958	/*
 959	 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
 960	 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
 961	 * Since the component suspend is called last, we can trap this corner case
 962	 * and force the DAIs to release their resources.
 963	 */
 964	for_each_component_dais(component, dai) {
 965		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 
 966		struct sdw_cdns_dai_runtime *dai_runtime;
 
 967
 968		dai_runtime = cdns->dai_runtime_array[dai->id];
 969
 970		if (!dai_runtime)
 971			continue;
 972
 973		if (dai_runtime->suspended)
 974			continue;
 975
 976		if (dai_runtime->paused)
 977			dai_runtime->suspended = true;
 
 
 
 
 
 978	}
 979
 980	return 0;
 981}
 982
 983static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
 
 984	.hw_params = intel_hw_params,
 985	.prepare = intel_prepare,
 986	.hw_free = intel_hw_free,
 987	.trigger = intel_trigger,
 
 988	.set_stream = intel_pcm_set_sdw_stream,
 989	.get_stream = intel_get_sdw_stream,
 990};
 991
 992static const struct snd_soc_component_driver dai_component = {
 993	.name			= "soundwire",
 994	.probe			= intel_component_probe,
 995	.suspend		= intel_component_dais_suspend,
 996	.legacy_dai_naming	= 1,
 997};
 998
 999static int intel_create_dai(struct sdw_cdns *cdns,
1000			    struct snd_soc_dai_driver *dais,
1001			    enum intel_pdi_type type,
1002			    u32 num, u32 off, u32 max_ch)
1003{
1004	int i;
1005
1006	if (num == 0)
1007		return 0;
1008
 
1009	for (i = off; i < (off + num); i++) {
1010		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1011					      "SDW%d Pin%d",
1012					      cdns->instance, i);
1013		if (!dais[i].name)
1014			return -ENOMEM;
1015
1016		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1017			dais[i].playback.channels_min = 1;
1018			dais[i].playback.channels_max = max_ch;
 
 
1019		}
1020
1021		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1022			dais[i].capture.channels_min = 1;
1023			dais[i].capture.channels_max = max_ch;
 
 
1024		}
1025
1026		dais[i].ops = &intel_pcm_dai_ops;
1027	}
1028
1029	return 0;
1030}
1031
1032static int intel_register_dai(struct sdw_intel *sdw)
1033{
1034	struct sdw_cdns_dai_runtime **dai_runtime_array;
1035	struct sdw_cdns_stream_config config;
1036	struct sdw_cdns *cdns = &sdw->cdns;
1037	struct sdw_cdns_streams *stream;
1038	struct snd_soc_dai_driver *dais;
1039	int num_dai, ret, off = 0;
1040
1041	/* Read the PDI config and initialize cadence PDI */
1042	intel_pdi_init(sdw, &config);
1043	ret = sdw_cdns_pdi_init(cdns, config);
1044	if (ret)
1045		return ret;
1046
1047	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
1048
1049	/* DAIs are created based on total number of PDIs supported */
1050	num_dai = cdns->pcm.num_pdi;
1051
1052	dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1053					 sizeof(struct sdw_cdns_dai_runtime *),
1054					 GFP_KERNEL);
1055	if (!dai_runtime_array)
1056		return -ENOMEM;
1057	cdns->dai_runtime_array = dai_runtime_array;
1058
1059	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1060	if (!dais)
1061		return -ENOMEM;
1062
1063	/* Create PCM DAIs */
1064	stream = &cdns->pcm;
1065
1066	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1067			       off, stream->num_ch_in);
1068	if (ret)
1069		return ret;
1070
1071	off += cdns->pcm.num_in;
1072	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1073			       off, stream->num_ch_out);
1074	if (ret)
1075		return ret;
1076
1077	off += cdns->pcm.num_out;
1078	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1079			       off, stream->num_ch_bd);
1080	if (ret)
1081		return ret;
1082
1083	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1084					       dais, num_dai);
1085}
1086
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1089	.debugfs_init = intel_debugfs_init,
1090	.debugfs_exit = intel_debugfs_exit,
1091
1092	.register_dai = intel_register_dai,
1093
1094	.check_clock_stop = intel_check_clock_stop,
1095	.start_bus = intel_start_bus,
1096	.start_bus_after_reset = intel_start_bus_after_reset,
1097	.start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1098	.stop_bus = intel_stop_bus,
1099
1100	.link_power_up = intel_link_power_up,
1101	.link_power_down = intel_link_power_down,
1102
1103	.shim_check_wake = intel_shim_check_wake,
1104	.shim_wake = intel_shim_wake,
1105
1106	.pre_bank_switch = intel_pre_bank_switch,
1107	.post_bank_switch = intel_post_bank_switch,
1108
1109	.sync_arm = intel_shim_sync_arm,
1110	.sync_go_unlocked = intel_shim_sync_go_unlocked,
1111	.sync_go = intel_shim_sync_go,
1112	.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
1113};
1114EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, "SOUNDWIRE_INTEL");
 
v6.2
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2// Copyright(c) 2015-17 Intel Corporation.
   3
   4/*
   5 * Soundwire Intel Master Driver
   6 */
   7
   8#include <linux/acpi.h>
 
   9#include <linux/debugfs.h>
  10#include <linux/delay.h>
  11#include <linux/io.h>
  12#include <sound/pcm_params.h>
  13#include <linux/pm_runtime.h>
  14#include <sound/soc.h>
  15#include <linux/soundwire/sdw_registers.h>
  16#include <linux/soundwire/sdw.h>
  17#include <linux/soundwire/sdw_intel.h>
  18#include "cadence_master.h"
  19#include "bus.h"
  20#include "intel.h"
  21
  22
  23enum intel_pdi_type {
  24	INTEL_PDI_IN = 0,
  25	INTEL_PDI_OUT = 1,
  26	INTEL_PDI_BD = 2,
  27};
  28
  29#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
  30
  31/*
  32 * Read, write helpers for HW registers
  33 */
  34static inline int intel_readl(void __iomem *base, int offset)
  35{
  36	return readl(base + offset);
  37}
  38
  39static inline void intel_writel(void __iomem *base, int offset, int value)
  40{
  41	writel(value, base + offset);
  42}
  43
  44static inline u16 intel_readw(void __iomem *base, int offset)
  45{
  46	return readw(base + offset);
  47}
  48
  49static inline void intel_writew(void __iomem *base, int offset, u16 value)
  50{
  51	writew(value, base + offset);
  52}
  53
  54static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
  55{
  56	int timeout = 10;
  57	u32 reg_read;
  58
  59	do {
  60		reg_read = readl(base + offset);
  61		if ((reg_read & mask) == target)
  62			return 0;
  63
  64		timeout--;
  65		usleep_range(50, 100);
  66	} while (timeout != 0);
  67
  68	return -EAGAIN;
  69}
  70
  71static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
  72{
  73	writel(value, base + offset);
  74	return intel_wait_bit(base, offset, mask, 0);
  75}
  76
  77static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
  78{
  79	writel(value, base + offset);
  80	return intel_wait_bit(base, offset, mask, mask);
  81}
  82
  83/*
  84 * debugfs
  85 */
  86#ifdef CONFIG_DEBUG_FS
  87
  88#define RD_BUF (2 * PAGE_SIZE)
  89
  90static ssize_t intel_sprintf(void __iomem *mem, bool l,
  91			     char *buf, size_t pos, unsigned int reg)
  92{
  93	int value;
  94
  95	if (l)
  96		value = intel_readl(mem, reg);
  97	else
  98		value = intel_readw(mem, reg);
  99
 100	return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
 101}
 102
 103static int intel_reg_show(struct seq_file *s_file, void *data)
 104{
 105	struct sdw_intel *sdw = s_file->private;
 106	void __iomem *s = sdw->link_res->shim;
 107	void __iomem *a = sdw->link_res->alh;
 108	char *buf;
 109	ssize_t ret;
 110	int i, j;
 111	unsigned int links, reg;
 112
 113	buf = kzalloc(RD_BUF, GFP_KERNEL);
 114	if (!buf)
 115		return -ENOMEM;
 116
 117	links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
 118
 119	ret = scnprintf(buf, RD_BUF, "Register  Value\n");
 120	ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
 121
 122	for (i = 0; i < links; i++) {
 123		reg = SDW_SHIM_LCAP + i * 4;
 124		ret += intel_sprintf(s, true, buf, ret, reg);
 125	}
 126
 127	for (i = 0; i < links; i++) {
 128		ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
 129		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
 130		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
 131		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
 132		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
 133		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
 134		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
 135
 136		ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
 137
 138		/*
 139		 * the value 10 is the number of PDIs. We will need a
 140		 * cleanup to remove hard-coded Intel configurations
 141		 * from cadence_master.c
 142		 */
 143		for (j = 0; j < 10; j++) {
 144			ret += intel_sprintf(s, false, buf, ret,
 145					SDW_SHIM_PCMSYCHM(i, j));
 146			ret += intel_sprintf(s, false, buf, ret,
 147					SDW_SHIM_PCMSYCHC(i, j));
 148		}
 149		ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
 150
 151		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
 152		ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
 153	}
 154
 155	ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
 156	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
 157	ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
 158
 159	ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
 160	for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
 161		ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
 162
 163	seq_printf(s_file, "%s", buf);
 164	kfree(buf);
 165
 166	return 0;
 167}
 168DEFINE_SHOW_ATTRIBUTE(intel_reg);
 169
 170static int intel_set_m_datamode(void *data, u64 value)
 171{
 172	struct sdw_intel *sdw = data;
 173	struct sdw_bus *bus = &sdw->cdns.bus;
 174
 175	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 176		return -EINVAL;
 177
 178	/* Userspace changed the hardware state behind the kernel's back */
 179	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 180
 181	bus->params.m_data_mode = value;
 182
 183	return 0;
 184}
 185DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
 186			 intel_set_m_datamode, "%llu\n");
 187
 188static int intel_set_s_datamode(void *data, u64 value)
 189{
 190	struct sdw_intel *sdw = data;
 191	struct sdw_bus *bus = &sdw->cdns.bus;
 192
 193	if (value > SDW_PORT_DATA_MODE_STATIC_1)
 194		return -EINVAL;
 195
 196	/* Userspace changed the hardware state behind the kernel's back */
 197	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 198
 199	bus->params.s_data_mode = value;
 200
 201	return 0;
 202}
 203DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
 204			 intel_set_s_datamode, "%llu\n");
 205
 206static void intel_debugfs_init(struct sdw_intel *sdw)
 207{
 208	struct dentry *root = sdw->cdns.bus.debugfs;
 209
 210	if (!root)
 211		return;
 212
 213	sdw->debugfs = debugfs_create_dir("intel-sdw", root);
 214
 215	debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
 216			    &intel_reg_fops);
 217
 218	debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
 219			    &intel_set_m_datamode_fops);
 220
 221	debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
 222			    &intel_set_s_datamode_fops);
 223
 224	sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
 225}
 226
 227static void intel_debugfs_exit(struct sdw_intel *sdw)
 228{
 229	debugfs_remove_recursive(sdw->debugfs);
 230}
 231#else
 232static void intel_debugfs_init(struct sdw_intel *sdw) {}
 233static void intel_debugfs_exit(struct sdw_intel *sdw) {}
 234#endif /* CONFIG_DEBUG_FS */
 235
 236/*
 237 * shim ops
 238 */
 239/* this needs to be called with shim_lock */
 240static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
 241{
 242	void __iomem *shim = sdw->link_res->shim;
 243	unsigned int link_id = sdw->instance;
 244	u16 ioctl;
 245
 246	/* Switch to MIP from Glue logic */
 247	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
 248
 249	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
 250	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 251	usleep_range(10, 15);
 252
 253	ioctl &= ~(SDW_SHIM_IOCTL_DO);
 254	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 255	usleep_range(10, 15);
 256
 257	ioctl |= (SDW_SHIM_IOCTL_MIF);
 258	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 259	usleep_range(10, 15);
 260
 261	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
 262	ioctl &= ~(SDW_SHIM_IOCTL_COE);
 263	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 264	usleep_range(10, 15);
 265
 266	/* at this point Master IP has full control of the I/Os */
 267}
 268
 269/* this needs to be called with shim_lock */
 270static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
 271{
 272	unsigned int link_id = sdw->instance;
 273	void __iomem *shim = sdw->link_res->shim;
 274	u16 ioctl;
 275
 276	/* Glue logic */
 277	ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
 278	ioctl |= SDW_SHIM_IOCTL_BKE;
 279	ioctl |= SDW_SHIM_IOCTL_COE;
 280	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 281	usleep_range(10, 15);
 282
 283	ioctl &= ~(SDW_SHIM_IOCTL_MIF);
 284	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 285	usleep_range(10, 15);
 286
 287	/* at this point Integration Glue has full control of the I/Os */
 288}
 289
 290/* this needs to be called with shim_lock */
 291static void intel_shim_init(struct sdw_intel *sdw)
 292{
 293	void __iomem *shim = sdw->link_res->shim;
 294	unsigned int link_id = sdw->instance;
 295	u16 ioctl = 0, act = 0;
 296
 297	/* Initialize Shim */
 298	ioctl |= SDW_SHIM_IOCTL_BKE;
 299	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 300	usleep_range(10, 15);
 301
 302	ioctl |= SDW_SHIM_IOCTL_WPDD;
 303	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 304	usleep_range(10, 15);
 305
 306	ioctl |= SDW_SHIM_IOCTL_DO;
 307	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 308	usleep_range(10, 15);
 309
 310	ioctl |= SDW_SHIM_IOCTL_DOE;
 311	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
 312	usleep_range(10, 15);
 313
 314	intel_shim_glue_to_master_ip(sdw);
 315
 
 316	u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
 317	act |= SDW_SHIM_CTMCTL_DACTQE;
 318	act |= SDW_SHIM_CTMCTL_DODS;
 319	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
 320	usleep_range(10, 15);
 321}
 322
 323static int intel_shim_check_wake(struct sdw_intel *sdw)
 324{
 325	void __iomem *shim;
 326	u16 wake_sts;
 327
 328	shim = sdw->link_res->shim;
 329	wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 330
 331	return wake_sts & BIT(sdw->instance);
 332}
 333
 334static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
 335{
 336	void __iomem *shim = sdw->link_res->shim;
 337	unsigned int link_id = sdw->instance;
 338	u16 wake_en, wake_sts;
 339
 340	mutex_lock(sdw->link_res->shim_lock);
 341	wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
 342
 343	if (wake_enable) {
 344		/* Enable the wakeup */
 345		wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
 346		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 347	} else {
 348		/* Disable the wake up interrupt */
 349		wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
 350		intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
 351
 352		/* Clear wake status */
 353		wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
 354		wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
 355		intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
 356	}
 357	mutex_unlock(sdw->link_res->shim_lock);
 358}
 359
 
 
 
 
 
 
 
 
 
 360static int intel_link_power_up(struct sdw_intel *sdw)
 361{
 362	unsigned int link_id = sdw->instance;
 363	void __iomem *shim = sdw->link_res->shim;
 364	u32 *shim_mask = sdw->link_res->shim_mask;
 365	struct sdw_bus *bus = &sdw->cdns.bus;
 366	struct sdw_master_prop *prop = &bus->prop;
 367	u32 spa_mask, cpa_mask;
 368	u32 link_control;
 369	int ret = 0;
 
 370	u32 syncprd;
 371	u32 sync_reg;
 
 372
 373	mutex_lock(sdw->link_res->shim_lock);
 374
 375	/*
 376	 * The hardware relies on an internal counter, typically 4kHz,
 377	 * to generate the SoundWire SSP - which defines a 'safe'
 378	 * synchronization point between commands and audio transport
 379	 * and allows for multi link synchronization. The SYNCPRD value
 380	 * is only dependent on the oscillator clock provided to
 381	 * the IP, so adjust based on _DSD properties reported in DSDT
 382	 * tables. The values reported are based on either 24MHz
 383	 * (CNL/CML) or 38.4 MHz (ICL/TGL+).
 
 384	 */
 385	if (prop->mclk_freq % 6000000)
 386		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
 387	else
 388		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389
 390	if (!*shim_mask) {
 391		dev_dbg(sdw->cdns.dev, "powering up all links\n");
 392
 393		/* we first need to program the SyncPRD/CPU registers */
 394		dev_dbg(sdw->cdns.dev,
 395			"first link up, programming SYNCPRD\n");
 396
 397		/* set SyncPRD period */
 398		sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 399		u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
 400
 401		/* Set SyncCPU bit */
 402		sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
 403		intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 404
 405		/* Link power up sequence */
 406		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 407
 408		/* only power-up enabled links */
 409		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
 410		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 411
 412		link_control |=  spa_mask;
 413
 414		ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 415		if (ret < 0) {
 416			dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
 417			goto out;
 418		}
 419
 420		/* SyncCPU will change once link is active */
 421		ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
 422				     SDW_SHIM_SYNC_SYNCCPU, 0);
 423		if (ret < 0) {
 424			dev_err(sdw->cdns.dev,
 425				"Failed to set SHIM_SYNC: %d\n", ret);
 426			goto out;
 427		}
 
 
 
 
 
 
 
 428	}
 429
 430	*shim_mask |= BIT(link_id);
 431
 432	sdw->cdns.link_up = true;
 433
 434	intel_shim_init(sdw);
 435
 436out:
 437	mutex_unlock(sdw->link_res->shim_lock);
 438
 439	return ret;
 440}
 441
 442static int intel_link_power_down(struct sdw_intel *sdw)
 443{
 444	u32 link_control, spa_mask, cpa_mask;
 445	unsigned int link_id = sdw->instance;
 446	void __iomem *shim = sdw->link_res->shim;
 447	u32 *shim_mask = sdw->link_res->shim_mask;
 448	int ret = 0;
 449
 450	mutex_lock(sdw->link_res->shim_lock);
 451
 452	if (!(*shim_mask & BIT(link_id)))
 453		dev_err(sdw->cdns.dev,
 454			"%s: Unbalanced power-up/down calls\n", __func__);
 455
 456	sdw->cdns.link_up = false;
 457
 458	intel_shim_master_ip_to_glue(sdw);
 459
 460	*shim_mask &= ~BIT(link_id);
 461
 462	if (!*shim_mask) {
 463
 464		dev_dbg(sdw->cdns.dev, "powering down all links\n");
 465
 466		/* Link power down sequence */
 467		link_control = intel_readl(shim, SDW_SHIM_LCTL);
 468
 469		/* only power-down enabled links */
 470		spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
 471		cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
 472
 473		link_control &=  spa_mask;
 474
 475		ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
 476		if (ret < 0) {
 477			dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
 478
 479			/*
 480			 * we leave the sdw->cdns.link_up flag as false since we've disabled
 481			 * the link at this point and cannot handle interrupts any longer.
 482			 */
 483		}
 484	}
 485
 486	mutex_unlock(sdw->link_res->shim_lock);
 487
 488	return ret;
 489}
 490
 491static void intel_shim_sync_arm(struct sdw_intel *sdw)
 492{
 493	void __iomem *shim = sdw->link_res->shim;
 494	u32 sync_reg;
 495
 496	mutex_lock(sdw->link_res->shim_lock);
 497
 498	/* update SYNC register */
 499	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 500	sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
 501	intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
 502
 503	mutex_unlock(sdw->link_res->shim_lock);
 504}
 505
 506static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
 507{
 508	void __iomem *shim = sdw->link_res->shim;
 509	u32 sync_reg;
 510	int ret;
 511
 512	/* Read SYNC register */
 513	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 514
 515	/*
 516	 * Set SyncGO bit to synchronously trigger a bank switch for
 517	 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
 518	 * the Masters.
 519	 */
 520	sync_reg |= SDW_SHIM_SYNC_SYNCGO;
 521
 522	ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
 523			      SDW_SHIM_SYNC_SYNCGO);
 524
 525	if (ret < 0)
 526		dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
 527
 528	return ret;
 529}
 530
 531static int intel_shim_sync_go(struct sdw_intel *sdw)
 532{
 533	int ret;
 534
 535	mutex_lock(sdw->link_res->shim_lock);
 536
 537	ret = intel_shim_sync_go_unlocked(sdw);
 538
 539	mutex_unlock(sdw->link_res->shim_lock);
 540
 541	return ret;
 542}
 543
 544/*
 545 * PDI routines
 546 */
 547static void intel_pdi_init(struct sdw_intel *sdw,
 548			   struct sdw_cdns_stream_config *config)
 549{
 550	void __iomem *shim = sdw->link_res->shim;
 551	unsigned int link_id = sdw->instance;
 552	int pcm_cap;
 553
 554	/* PCM Stream Capability */
 555	pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
 556
 557	config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
 558	config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
 559	config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
 560
 561	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
 562		config->pcm_bd, config->pcm_in, config->pcm_out);
 563}
 564
 565static int
 566intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
 567{
 568	void __iomem *shim = sdw->link_res->shim;
 569	unsigned int link_id = sdw->instance;
 570	int count;
 571
 572	count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
 573
 574	/*
 575	 * WORKAROUND: on all existing Intel controllers, pdi
 576	 * number 2 reports channel count as 1 even though it
 577	 * supports 8 channels. Performing hardcoding for pdi
 578	 * number 2.
 579	 */
 580	if (pdi_num == 2)
 581		count = 7;
 582
 583	/* zero based values for channel count in register */
 584	count++;
 585
 586	return count;
 587}
 588
 589static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
 590				   struct sdw_cdns_pdi *pdi,
 591				   unsigned int num_pdi,
 592				   unsigned int *num_ch)
 593{
 594	int i, ch_count = 0;
 595
 596	for (i = 0; i < num_pdi; i++) {
 597		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
 598		ch_count += pdi->ch_count;
 599		pdi++;
 600	}
 601
 602	*num_ch = ch_count;
 603	return 0;
 604}
 605
 606static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
 607				      struct sdw_cdns_streams *stream)
 608{
 609	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
 610				&stream->num_ch_bd);
 611
 612	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
 613				&stream->num_ch_in);
 614
 615	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
 616				&stream->num_ch_out);
 617
 618	return 0;
 619}
 620
 621static int intel_pdi_ch_update(struct sdw_intel *sdw)
 622{
 623	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
 624
 625	return 0;
 626}
 627
 628static void
 629intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 630{
 631	void __iomem *shim = sdw->link_res->shim;
 632	unsigned int link_id = sdw->instance;
 633	int pdi_conf = 0;
 634
 635	/* the Bulk and PCM streams are not contiguous */
 636	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 637	if (pdi->num >= 2)
 638		pdi->intel_alh_id += 2;
 639
 640	/*
 641	 * Program stream parameters to stream SHIM register
 642	 * This is applicable for PCM stream only.
 643	 */
 644	if (pdi->type != SDW_STREAM_PCM)
 645		return;
 646
 647	if (pdi->dir == SDW_DATA_DIR_RX)
 648		pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
 649	else
 650		pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
 651
 652	u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
 653	u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
 654	u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
 655
 656	intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
 657}
 658
 659static void
 660intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
 661{
 662	void __iomem *alh = sdw->link_res->alh;
 663	unsigned int link_id = sdw->instance;
 664	unsigned int conf;
 665
 666	/* the Bulk and PCM streams are not contiguous */
 667	pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
 668	if (pdi->num >= 2)
 669		pdi->intel_alh_id += 2;
 670
 671	/* Program Stream config ALH register */
 672	conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
 673
 674	u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
 675	u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
 676
 677	intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
 678}
 679
 680static int intel_params_stream(struct sdw_intel *sdw,
 681			       int stream,
 682			       struct snd_soc_dai *dai,
 683			       struct snd_pcm_hw_params *hw_params,
 684			       int link_id, int alh_stream_id)
 685{
 686	struct sdw_intel_link_res *res = sdw->link_res;
 687	struct sdw_intel_stream_params_data params_data;
 688
 689	params_data.stream = stream; /* direction */
 690	params_data.dai = dai;
 691	params_data.hw_params = hw_params;
 692	params_data.link_id = link_id;
 693	params_data.alh_stream_id = alh_stream_id;
 694
 695	if (res->ops && res->ops->params_stream && res->dev)
 696		return res->ops->params_stream(res->dev,
 697					       &params_data);
 698	return -EIO;
 699}
 700
 
 
 
 
 701static int intel_free_stream(struct sdw_intel *sdw,
 702			     int stream,
 703			     struct snd_soc_dai *dai,
 704			     int link_id)
 705{
 706	struct sdw_intel_link_res *res = sdw->link_res;
 707	struct sdw_intel_stream_free_data free_data;
 708
 709	free_data.stream = stream; /* direction */
 710	free_data.dai = dai;
 711	free_data.link_id = link_id;
 712
 713	if (res->ops && res->ops->free_stream && res->dev)
 714		return res->ops->free_stream(res->dev,
 715					     &free_data);
 716
 717	return 0;
 718}
 719
 720/*
 721 * bank switch routines
 722 */
 723
 724static int intel_pre_bank_switch(struct sdw_intel *sdw)
 725{
 726	struct sdw_cdns *cdns = &sdw->cdns;
 727	struct sdw_bus *bus = &cdns->bus;
 728
 729	/* Write to register only for multi-link */
 730	if (!bus->multi_link)
 731		return 0;
 732
 733	intel_shim_sync_arm(sdw);
 734
 735	return 0;
 736}
 737
 738static int intel_post_bank_switch(struct sdw_intel *sdw)
 739{
 740	struct sdw_cdns *cdns = &sdw->cdns;
 741	struct sdw_bus *bus = &cdns->bus;
 742	void __iomem *shim = sdw->link_res->shim;
 743	int sync_reg, ret;
 744
 745	/* Write to register only for multi-link */
 746	if (!bus->multi_link)
 747		return 0;
 748
 749	mutex_lock(sdw->link_res->shim_lock);
 750
 751	/* Read SYNC register */
 752	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
 753
 754	/*
 755	 * post_bank_switch() ops is called from the bus in loop for
 756	 * all the Masters in the steam with the expectation that
 757	 * we trigger the bankswitch for the only first Master in the list
 758	 * and do nothing for the other Masters
 759	 *
 760	 * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
 761	 */
 762	if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
 763		ret = 0;
 764		goto unlock;
 765	}
 766
 767	ret = intel_shim_sync_go_unlocked(sdw);
 768unlock:
 769	mutex_unlock(sdw->link_res->shim_lock);
 770
 771	if (ret < 0)
 772		dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
 773
 774	return ret;
 775}
 776
 777/*
 778 * DAI routines
 779 */
 780
 781static int intel_startup(struct snd_pcm_substream *substream,
 782			 struct snd_soc_dai *dai)
 783{
 784	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 785	int ret;
 786
 787	ret = pm_runtime_resume_and_get(cdns->dev);
 788	if (ret < 0 && ret != -EACCES) {
 789		dev_err_ratelimited(cdns->dev,
 790				    "pm_runtime_resume_and_get failed in %s, ret %d\n",
 791				    __func__, ret);
 792		return ret;
 793	}
 794	return 0;
 795}
 796
 797static int intel_hw_params(struct snd_pcm_substream *substream,
 798			   struct snd_pcm_hw_params *params,
 799			   struct snd_soc_dai *dai)
 800{
 801	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 802	struct sdw_intel *sdw = cdns_to_intel(cdns);
 803	struct sdw_cdns_dai_runtime *dai_runtime;
 804	struct sdw_cdns_pdi *pdi;
 805	struct sdw_stream_config sconfig;
 806	struct sdw_port_config *pconfig;
 807	int ch, dir;
 808	int ret;
 809
 810	dai_runtime = cdns->dai_runtime_array[dai->id];
 811	if (!dai_runtime)
 812		return -EIO;
 813
 814	ch = params_channels(params);
 815	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 816		dir = SDW_DATA_DIR_RX;
 817	else
 818		dir = SDW_DATA_DIR_TX;
 819
 820	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
 821
 822	if (!pdi) {
 823		ret = -EINVAL;
 824		goto error;
 825	}
 826
 827	/* do run-time configurations for SHIM, ALH and PDI/PORT */
 828	intel_pdi_shim_configure(sdw, pdi);
 829	intel_pdi_alh_configure(sdw, pdi);
 830	sdw_cdns_config_stream(cdns, ch, dir, pdi);
 831
 832	/* store pdi and hw_params, may be needed in prepare step */
 833	dai_runtime->paused = false;
 834	dai_runtime->suspended = false;
 835	dai_runtime->pdi = pdi;
 836	dai_runtime->hw_params = params;
 837
 838	/* Inform DSP about PDI stream number */
 839	ret = intel_params_stream(sdw, substream->stream, dai, params,
 840				  sdw->instance,
 841				  pdi->intel_alh_id);
 842	if (ret)
 843		goto error;
 844
 845	sconfig.direction = dir;
 846	sconfig.ch_count = ch;
 847	sconfig.frame_rate = params_rate(params);
 848	sconfig.type = dai_runtime->stream_type;
 849
 850	sconfig.bps = snd_pcm_format_width(params_format(params));
 851
 852	/* Port configuration */
 853	pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
 854	if (!pconfig) {
 855		ret =  -ENOMEM;
 856		goto error;
 857	}
 858
 859	pconfig->num = pdi->num;
 860	pconfig->ch_mask = (1 << ch) - 1;
 861
 862	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
 863				    pconfig, 1, dai_runtime->stream);
 864	if (ret)
 865		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
 866
 867	kfree(pconfig);
 868error:
 869	return ret;
 870}
 871
 872static int intel_prepare(struct snd_pcm_substream *substream,
 873			 struct snd_soc_dai *dai)
 874{
 875	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 876	struct sdw_intel *sdw = cdns_to_intel(cdns);
 877	struct sdw_cdns_dai_runtime *dai_runtime;
 878	int ch, dir;
 879	int ret = 0;
 880
 881	dai_runtime = cdns->dai_runtime_array[dai->id];
 882	if (!dai_runtime) {
 883		dev_err(dai->dev, "failed to get dai runtime in %s\n",
 884			__func__);
 885		return -EIO;
 886	}
 887
 888	if (dai_runtime->suspended) {
 
 
 
 
 
 889		dai_runtime->suspended = false;
 890
 891		/*
 892		 * .prepare() is called after system resume, where we
 893		 * need to reinitialize the SHIM/ALH/Cadence IP.
 894		 * .prepare() is also called to deal with underflows,
 895		 * but in those cases we cannot touch ALH/SHIM
 896		 * registers
 897		 */
 898
 899		/* configure stream */
 900		ch = params_channels(dai_runtime->hw_params);
 901		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 902			dir = SDW_DATA_DIR_RX;
 903		else
 904			dir = SDW_DATA_DIR_TX;
 905
 906		intel_pdi_shim_configure(sdw, dai_runtime->pdi);
 907		intel_pdi_alh_configure(sdw, dai_runtime->pdi);
 908		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
 909
 910		/* Inform DSP about PDI stream number */
 911		ret = intel_params_stream(sdw, substream->stream, dai,
 912					  dai_runtime->hw_params,
 913					  sdw->instance,
 914					  dai_runtime->pdi->intel_alh_id);
 915	}
 916
 917	return ret;
 918}
 919
 920static int
 921intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
 922{
 923	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 924	struct sdw_intel *sdw = cdns_to_intel(cdns);
 925	struct sdw_cdns_dai_runtime *dai_runtime;
 926	int ret;
 927
 928	dai_runtime = cdns->dai_runtime_array[dai->id];
 929	if (!dai_runtime)
 930		return -EIO;
 931
 932	/*
 933	 * The sdw stream state will transition to RELEASED when stream->
 934	 * master_list is empty. So the stream state will transition to
 935	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
 936	 * cpu-dai.
 937	 */
 938	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
 939	if (ret < 0) {
 940		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
 941			dai_runtime->stream->name, ret);
 942		return ret;
 943	}
 944
 945	ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
 946	if (ret < 0) {
 947		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
 948		return ret;
 949	}
 950
 951	dai_runtime->hw_params = NULL;
 952	dai_runtime->pdi = NULL;
 953
 954	return 0;
 955}
 956
 957static void intel_shutdown(struct snd_pcm_substream *substream,
 958			   struct snd_soc_dai *dai)
 959{
 960	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 961
 962	pm_runtime_mark_last_busy(cdns->dev);
 963	pm_runtime_put_autosuspend(cdns->dev);
 964}
 965
 966static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
 967				    void *stream, int direction)
 968{
 969	return cdns_set_sdw_stream(dai, stream, direction);
 970}
 971
 972static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
 973				  int direction)
 974{
 975	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 976	struct sdw_cdns_dai_runtime *dai_runtime;
 977
 978	dai_runtime = cdns->dai_runtime_array[dai->id];
 979	if (!dai_runtime)
 980		return ERR_PTR(-EINVAL);
 981
 982	return dai_runtime->stream;
 983}
 984
 985static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
 986{
 987	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
 988	struct sdw_intel *sdw = cdns_to_intel(cdns);
 989	struct sdw_intel_link_res *res = sdw->link_res;
 990	struct sdw_cdns_dai_runtime *dai_runtime;
 991	int ret = 0;
 992
 993	/*
 994	 * The .trigger callback is used to send required IPC to audio
 995	 * firmware. The .free_stream callback will still be called
 996	 * by intel_free_stream() in the TRIGGER_SUSPEND case.
 997	 */
 998	if (res->ops && res->ops->trigger)
 999		res->ops->trigger(dai, cmd, substream->stream);
1000
1001	dai_runtime = cdns->dai_runtime_array[dai->id];
1002	if (!dai_runtime) {
1003		dev_err(dai->dev, "failed to get dai runtime in %s\n",
1004			__func__);
1005		return -EIO;
1006	}
1007
1008	switch (cmd) {
1009	case SNDRV_PCM_TRIGGER_SUSPEND:
1010
1011		/*
1012		 * The .prepare callback is used to deal with xruns and resume operations.
1013		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
1014		 * but for resume operations the DMAs and SHIM registers need to be initialized.
1015		 * the .trigger callback is used to track the suspend case only.
1016		 */
1017
1018		dai_runtime->suspended = true;
1019
1020		ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
1021		break;
1022
1023	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1024		dai_runtime->paused = true;
1025		break;
1026	case SNDRV_PCM_TRIGGER_STOP:
1027	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1028		dai_runtime->paused = false;
1029		break;
1030	default:
1031		break;
1032	}
1033
1034	return ret;
1035}
1036
1037static int intel_component_probe(struct snd_soc_component *component)
1038{
1039	int ret;
1040
1041	/*
1042	 * make sure the device is pm_runtime_active before initiating
1043	 * bus transactions during the card registration.
1044	 * We use pm_runtime_resume() here, without taking a reference
1045	 * and releasing it immediately.
1046	 */
1047	ret = pm_runtime_resume(component->dev);
1048	if (ret < 0 && ret != -EACCES)
1049		return ret;
1050
1051	return 0;
1052}
1053
1054static int intel_component_dais_suspend(struct snd_soc_component *component)
1055{
1056	struct snd_soc_dai *dai;
1057
1058	/*
1059	 * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
1060	 * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
1061	 * Since the component suspend is called last, we can trap this corner case
1062	 * and force the DAIs to release their resources.
1063	 */
1064	for_each_component_dais(component, dai) {
1065		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1066		struct sdw_intel *sdw = cdns_to_intel(cdns);
1067		struct sdw_cdns_dai_runtime *dai_runtime;
1068		int ret;
1069
1070		dai_runtime = cdns->dai_runtime_array[dai->id];
1071
1072		if (!dai_runtime)
1073			continue;
1074
1075		if (dai_runtime->suspended)
1076			continue;
1077
1078		if (dai_runtime->paused) {
1079			dai_runtime->suspended = true;
1080
1081			ret = intel_free_stream(sdw, dai_runtime->direction, dai, sdw->instance);
1082			if (ret < 0)
1083				return ret;
1084		}
1085	}
1086
1087	return 0;
1088}
1089
1090static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1091	.startup = intel_startup,
1092	.hw_params = intel_hw_params,
1093	.prepare = intel_prepare,
1094	.hw_free = intel_hw_free,
1095	.trigger = intel_trigger,
1096	.shutdown = intel_shutdown,
1097	.set_stream = intel_pcm_set_sdw_stream,
1098	.get_stream = intel_get_sdw_stream,
1099};
1100
1101static const struct snd_soc_component_driver dai_component = {
1102	.name			= "soundwire",
1103	.probe			= intel_component_probe,
1104	.suspend		= intel_component_dais_suspend,
1105	.legacy_dai_naming	= 1,
1106};
1107
1108static int intel_create_dai(struct sdw_cdns *cdns,
1109			    struct snd_soc_dai_driver *dais,
1110			    enum intel_pdi_type type,
1111			    u32 num, u32 off, u32 max_ch)
1112{
1113	int i;
1114
1115	if (num == 0)
1116		return 0;
1117
1118	 /* TODO: Read supported rates/formats from hardware */
1119	for (i = off; i < (off + num); i++) {
1120		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1121					      "SDW%d Pin%d",
1122					      cdns->instance, i);
1123		if (!dais[i].name)
1124			return -ENOMEM;
1125
1126		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1127			dais[i].playback.channels_min = 1;
1128			dais[i].playback.channels_max = max_ch;
1129			dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1130			dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1131		}
1132
1133		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1134			dais[i].capture.channels_min = 1;
1135			dais[i].capture.channels_max = max_ch;
1136			dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1137			dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1138		}
1139
1140		dais[i].ops = &intel_pcm_dai_ops;
1141	}
1142
1143	return 0;
1144}
1145
1146static int intel_register_dai(struct sdw_intel *sdw)
1147{
1148	struct sdw_cdns_dai_runtime **dai_runtime_array;
1149	struct sdw_cdns_stream_config config;
1150	struct sdw_cdns *cdns = &sdw->cdns;
1151	struct sdw_cdns_streams *stream;
1152	struct snd_soc_dai_driver *dais;
1153	int num_dai, ret, off = 0;
1154
1155	/* Read the PDI config and initialize cadence PDI */
1156	intel_pdi_init(sdw, &config);
1157	ret = sdw_cdns_pdi_init(cdns, config);
1158	if (ret)
1159		return ret;
1160
1161	intel_pdi_ch_update(sdw);
1162
1163	/* DAIs are created based on total number of PDIs supported */
1164	num_dai = cdns->pcm.num_pdi;
1165
1166	dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1167					 sizeof(struct sdw_cdns_dai_runtime *),
1168					 GFP_KERNEL);
1169	if (!dai_runtime_array)
1170		return -ENOMEM;
1171	cdns->dai_runtime_array = dai_runtime_array;
1172
1173	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1174	if (!dais)
1175		return -ENOMEM;
1176
1177	/* Create PCM DAIs */
1178	stream = &cdns->pcm;
1179
1180	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1181			       off, stream->num_ch_in);
1182	if (ret)
1183		return ret;
1184
1185	off += cdns->pcm.num_in;
1186	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1187			       off, stream->num_ch_out);
1188	if (ret)
1189		return ret;
1190
1191	off += cdns->pcm.num_out;
1192	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1193			       off, stream->num_ch_bd);
1194	if (ret)
1195		return ret;
1196
1197	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1198					       dais, num_dai);
1199}
1200
1201static int intel_start_bus(struct sdw_intel *sdw)
1202{
1203	struct device *dev = sdw->cdns.dev;
1204	struct sdw_cdns *cdns = &sdw->cdns;
1205	struct sdw_bus *bus = &cdns->bus;
1206	int ret;
1207
1208	ret = sdw_cdns_enable_interrupt(cdns, true);
1209	if (ret < 0) {
1210		dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1211		return ret;
1212	}
1213
1214	/*
1215	 * follow recommended programming flows to avoid timeouts when
1216	 * gsync is enabled
1217	 */
1218	if (bus->multi_link)
1219		intel_shim_sync_arm(sdw);
1220
1221	ret = sdw_cdns_init(cdns);
1222	if (ret < 0) {
1223		dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
1224		goto err_interrupt;
1225	}
1226
1227	ret = sdw_cdns_exit_reset(cdns);
1228	if (ret < 0) {
1229		dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
1230		goto err_interrupt;
1231	}
1232
1233	if (bus->multi_link) {
1234		ret = intel_shim_sync_go(sdw);
1235		if (ret < 0) {
1236			dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
1237			goto err_interrupt;
1238		}
1239	}
1240	sdw_cdns_check_self_clearing_bits(cdns, __func__,
1241					  true, INTEL_MASTER_RESET_ITERATIONS);
1242
1243	return 0;
1244
1245err_interrupt:
1246	sdw_cdns_enable_interrupt(cdns, false);
1247	return ret;
1248}
1249
1250static int intel_start_bus_after_reset(struct sdw_intel *sdw)
1251{
1252	struct device *dev = sdw->cdns.dev;
1253	struct sdw_cdns *cdns = &sdw->cdns;
1254	struct sdw_bus *bus = &cdns->bus;
1255	bool clock_stop0;
1256	int status;
1257	int ret;
1258
1259	/*
1260	 * An exception condition occurs for the CLK_STOP_BUS_RESET
1261	 * case if one or more masters remain active. In this condition,
1262	 * all the masters are powered on for they are in the same power
1263	 * domain. Master can preserve its context for clock stop0, so
1264	 * there is no need to clear slave status and reset bus.
1265	 */
1266	clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1267
1268	if (!clock_stop0) {
1269
1270		/*
1271		 * make sure all Slaves are tagged as UNATTACHED and
1272		 * provide reason for reinitialization
1273		 */
1274
1275		status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1276		sdw_clear_slave_status(bus, status);
1277
1278		ret = sdw_cdns_enable_interrupt(cdns, true);
1279		if (ret < 0) {
1280			dev_err(dev, "cannot enable interrupts during resume\n");
1281			return ret;
1282		}
1283
1284		/*
1285		 * follow recommended programming flows to avoid
1286		 * timeouts when gsync is enabled
1287		 */
1288		if (bus->multi_link)
1289			intel_shim_sync_arm(sdw);
1290
1291		/*
1292		 * Re-initialize the IP since it was powered-off
1293		 */
1294		sdw_cdns_init(&sdw->cdns);
1295
1296	} else {
1297		ret = sdw_cdns_enable_interrupt(cdns, true);
1298		if (ret < 0) {
1299			dev_err(dev, "cannot enable interrupts during resume\n");
1300			return ret;
1301		}
1302	}
1303
1304	ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1305	if (ret < 0) {
1306		dev_err(dev, "unable to restart clock during resume\n");
1307		goto err_interrupt;
1308	}
1309
1310	if (!clock_stop0) {
1311		ret = sdw_cdns_exit_reset(cdns);
1312		if (ret < 0) {
1313			dev_err(dev, "unable to exit bus reset sequence during resume\n");
1314			goto err_interrupt;
1315		}
1316
1317		if (bus->multi_link) {
1318			ret = intel_shim_sync_go(sdw);
1319			if (ret < 0) {
1320				dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1321				goto err_interrupt;
1322			}
1323		}
1324	}
1325	sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
1326
1327	return 0;
1328
1329err_interrupt:
1330	sdw_cdns_enable_interrupt(cdns, false);
1331	return ret;
1332}
1333
1334static void intel_check_clock_stop(struct sdw_intel *sdw)
1335{
1336	struct device *dev = sdw->cdns.dev;
1337	bool clock_stop0;
1338
1339	clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1340	if (!clock_stop0)
1341		dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
1342}
1343
1344static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
1345{
1346	struct device *dev = sdw->cdns.dev;
1347	struct sdw_cdns *cdns = &sdw->cdns;
1348	int ret;
1349
1350	ret = sdw_cdns_enable_interrupt(cdns, true);
1351	if (ret < 0) {
1352		dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1353		return ret;
1354	}
1355
1356	ret = sdw_cdns_clock_restart(cdns, false);
1357	if (ret < 0) {
1358		dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
1359		sdw_cdns_enable_interrupt(cdns, false);
1360		return ret;
1361	}
1362
1363	sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
1364					  true, INTEL_MASTER_RESET_ITERATIONS);
1365
1366	return 0;
1367}
1368
1369static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
1370{
1371	struct device *dev = sdw->cdns.dev;
1372	struct sdw_cdns *cdns = &sdw->cdns;
1373	bool wake_enable = false;
1374	int ret;
1375
1376	if (clock_stop) {
1377		ret = sdw_cdns_clock_stop(cdns, true);
1378		if (ret < 0)
1379			dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
1380		else
1381			wake_enable = true;
1382	}
1383
1384	ret = sdw_cdns_enable_interrupt(cdns, false);
1385	if (ret < 0) {
1386		dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
1387		return ret;
1388	}
1389
1390	ret = intel_link_power_down(sdw);
1391	if (ret) {
1392		dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
1393		return ret;
1394	}
1395
1396	intel_shim_wake(sdw, wake_enable);
1397
1398	return 0;
1399}
1400
1401const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1402	.debugfs_init = intel_debugfs_init,
1403	.debugfs_exit = intel_debugfs_exit,
1404
1405	.register_dai = intel_register_dai,
1406
1407	.check_clock_stop = intel_check_clock_stop,
1408	.start_bus = intel_start_bus,
1409	.start_bus_after_reset = intel_start_bus_after_reset,
1410	.start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1411	.stop_bus = intel_stop_bus,
1412
1413	.link_power_up = intel_link_power_up,
1414	.link_power_down = intel_link_power_down,
1415
1416	.shim_check_wake = intel_shim_check_wake,
1417	.shim_wake = intel_shim_wake,
1418
1419	.pre_bank_switch = intel_pre_bank_switch,
1420	.post_bank_switch = intel_post_bank_switch,
 
 
 
 
 
1421};
1422EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL);
1423