Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2//
3// Apple SoCs MCA driver
4//
5// Copyright (C) The Asahi Linux Contributors
6//
7// The MCA peripheral is made up of a number of identical units called clusters.
8// Each cluster has its separate clock parent, SYNC signal generator, carries
9// four SERDES units and has a dedicated I2S port on the SoC's periphery.
10//
11// The clusters can operate independently, or can be combined together in a
12// configurable manner. We mostly treat them as self-contained independent
13// units and don't configure any cross-cluster connections except for the I2S
14// ports. The I2S ports can be routed to any of the clusters (irrespective
15// of their native cluster). We map this onto ASoC's (DPCM) notion of backend
16// and frontend DAIs. The 'cluster guts' are frontends which are dynamically
17// routed to backend I2S ports.
18//
19// DAI references in devicetree are resolved to backends. The routing between
20// frontends and backends is determined by the machine driver in the DAPM paths
21// it supplies.
22
23#include <linux/bitfield.h>
24#include <linux/clk.h>
25#include <linux/dma-mapping.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/of.h>
30#include <linux/of_clk.h>
31#include <linux/of_dma.h>
32#include <linux/platform_device.h>
33#include <linux/pm_domain.h>
34#include <linux/regmap.h>
35#include <linux/reset.h>
36#include <linux/slab.h>
37
38#include <sound/core.h>
39#include <sound/pcm.h>
40#include <sound/pcm_params.h>
41#include <sound/soc.h>
42#include <sound/dmaengine_pcm.h>
43
44#define USE_RXB_FOR_CAPTURE
45
46/* Relative to cluster base */
47#define REG_STATUS 0x0
48#define STATUS_MCLK_EN BIT(0)
49#define REG_MCLK_CONF 0x4
50#define MCLK_CONF_DIV GENMASK(11, 8)
51
52#define REG_SYNCGEN_STATUS 0x100
53#define SYNCGEN_STATUS_EN BIT(0)
54#define REG_SYNCGEN_MCLK_SEL 0x104
55#define SYNCGEN_MCLK_SEL GENMASK(3, 0)
56#define REG_SYNCGEN_HI_PERIOD 0x108
57#define REG_SYNCGEN_LO_PERIOD 0x10c
58
59#define REG_PORT_ENABLES 0x600
60#define PORT_ENABLES_CLOCKS GENMASK(2, 1)
61#define PORT_ENABLES_TX_DATA BIT(3)
62#define REG_PORT_CLOCK_SEL 0x604
63#define PORT_CLOCK_SEL GENMASK(11, 8)
64#define REG_PORT_DATA_SEL 0x608
65#define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2))
66#define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2))
67
68#define REG_INTSTATE 0x700
69#define REG_INTMASK 0x704
70
71/* Bases of serdes units (relative to cluster) */
72#define CLUSTER_RXA_OFF 0x200
73#define CLUSTER_TXA_OFF 0x300
74#define CLUSTER_RXB_OFF 0x400
75#define CLUSTER_TXB_OFF 0x500
76
77#define CLUSTER_TX_OFF CLUSTER_TXA_OFF
78
79#ifndef USE_RXB_FOR_CAPTURE
80#define CLUSTER_RX_OFF CLUSTER_RXA_OFF
81#else
82#define CLUSTER_RX_OFF CLUSTER_RXB_OFF
83#endif
84
85/* Relative to serdes unit base */
86#define REG_SERDES_STATUS 0x00
87#define SERDES_STATUS_EN BIT(0)
88#define SERDES_STATUS_RST BIT(1)
89#define REG_TX_SERDES_CONF 0x04
90#define REG_RX_SERDES_CONF 0x08
91#define SERDES_CONF_NCHANS GENMASK(3, 0)
92#define SERDES_CONF_WIDTH_MASK GENMASK(8, 4)
93#define SERDES_CONF_WIDTH_16BIT 0x40
94#define SERDES_CONF_WIDTH_20BIT 0x80
95#define SERDES_CONF_WIDTH_24BIT 0xc0
96#define SERDES_CONF_WIDTH_32BIT 0x100
97#define SERDES_CONF_BCLK_POL 0x400
98#define SERDES_CONF_LSB_FIRST 0x800
99#define SERDES_CONF_UNK1 BIT(12)
100#define SERDES_CONF_UNK2 BIT(13)
101#define SERDES_CONF_UNK3 BIT(14)
102#define SERDES_CONF_NO_DATA_FEEDBACK BIT(15)
103#define SERDES_CONF_SYNC_SEL GENMASK(18, 16)
104#define SERDES_CONF_SOME_RST BIT(19)
105#define REG_TX_SERDES_BITSTART 0x08
106#define REG_RX_SERDES_BITSTART 0x0c
107#define REG_TX_SERDES_SLOTMASK 0x0c
108#define REG_RX_SERDES_SLOTMASK 0x10
109#define REG_RX_SERDES_PORT 0x04
110
111/* Relative to switch base */
112#define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl))
113#define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000)
114#define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0)
115#define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5)
116#define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8)
117#define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13)
118#define DMA_ADAPTER_NCHANS GENMASK(22, 20)
119
120#define SWITCH_STRIDE 0x8000
121#define CLUSTER_STRIDE 0x4000
122
123#define MAX_NCLUSTERS 6
124
125#define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \
126 SNDRV_PCM_FMTBIT_S24_LE | \
127 SNDRV_PCM_FMTBIT_S32_LE)
128
129struct mca_cluster {
130 int no;
131 __iomem void *base;
132 struct mca_data *host;
133 struct device *pd_dev;
134 struct clk *clk_parent;
135 struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1];
136
137 bool port_started[SNDRV_PCM_STREAM_LAST + 1];
138 int port_driver; /* The cluster driving this cluster's port */
139
140 bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1];
141 struct device_link *pd_link;
142
143 unsigned int bclk_ratio;
144
145 /* Masks etc. picked up via the set_tdm_slot method */
146 int tdm_slots;
147 int tdm_slot_width;
148 unsigned int tdm_tx_mask;
149 unsigned int tdm_rx_mask;
150};
151
152struct mca_data {
153 struct device *dev;
154
155 __iomem void *switch_base;
156
157 struct device *pd_dev;
158 struct reset_control *rstc;
159 struct device_link *pd_link;
160
161 /* Mutex for accessing port_driver of foreign clusters */
162 struct mutex port_mutex;
163
164 int nclusters;
165 struct mca_cluster clusters[];
166};
167
168static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val)
169{
170 __iomem void *ptr = cl->base + regoffset;
171 u32 newval;
172
173 newval = (val & mask) | (readl_relaxed(ptr) & ~mask);
174 writel_relaxed(newval, ptr);
175}
176
177/*
178 * Get the cluster of FE or BE DAI
179 */
180static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai)
181{
182 struct mca_data *mca = snd_soc_dai_get_drvdata(dai);
183 /*
184 * FE DAIs are 0 ... nclusters - 1
185 * BE DAIs are nclusters ... 2*nclusters - 1
186 */
187 int cluster_no = dai->id % mca->nclusters;
188
189 return &mca->clusters[cluster_no];
190}
191
192/* called before PCM trigger */
193static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
194 struct snd_soc_dai *dai)
195{
196 struct mca_cluster *cl = mca_dai_to_cluster(dai);
197 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
198 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
199 int serdes_conf =
200 serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF);
201
202 switch (cmd) {
203 case SNDRV_PCM_TRIGGER_START:
204 case SNDRV_PCM_TRIGGER_RESUME:
205 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
206 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
207 SERDES_STATUS_EN | SERDES_STATUS_RST,
208 SERDES_STATUS_RST);
209 mca_modify(cl, serdes_conf, SERDES_CONF_SOME_RST,
210 SERDES_CONF_SOME_RST);
211 readl_relaxed(cl->base + serdes_conf);
212 mca_modify(cl, serdes_conf, SERDES_STATUS_RST, 0);
213 WARN_ON(readl_relaxed(cl->base + REG_SERDES_STATUS) &
214 SERDES_STATUS_RST);
215 break;
216 default:
217 break;
218 }
219}
220
221static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd,
222 struct snd_soc_dai *dai)
223{
224 struct mca_cluster *cl = mca_dai_to_cluster(dai);
225 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
226 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
227
228 switch (cmd) {
229 case SNDRV_PCM_TRIGGER_START:
230 case SNDRV_PCM_TRIGGER_RESUME:
231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
232 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
233 SERDES_STATUS_EN | SERDES_STATUS_RST,
234 SERDES_STATUS_EN);
235 break;
236
237 case SNDRV_PCM_TRIGGER_STOP:
238 case SNDRV_PCM_TRIGGER_SUSPEND:
239 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
240 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
241 SERDES_STATUS_EN, 0);
242 break;
243
244 default:
245 return -EINVAL;
246 }
247
248 return 0;
249}
250
251static int mca_fe_enable_clocks(struct mca_cluster *cl)
252{
253 struct mca_data *mca = cl->host;
254 int ret;
255
256 ret = clk_prepare_enable(cl->clk_parent);
257 if (ret) {
258 dev_err(mca->dev,
259 "cluster %d: unable to enable clock parent: %d\n",
260 cl->no, ret);
261 return ret;
262 }
263
264 /*
265 * We can't power up the device earlier than this because
266 * the power state driver would error out on seeing the device
267 * as clock-gated.
268 */
269 cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
270 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
271 DL_FLAG_RPM_ACTIVE);
272 if (!cl->pd_link) {
273 dev_err(mca->dev,
274 "cluster %d: unable to prop-up power domain\n", cl->no);
275 clk_disable_unprepare(cl->clk_parent);
276 return -EINVAL;
277 }
278
279 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
280 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
281 SYNCGEN_STATUS_EN);
282 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN);
283
284 return 0;
285}
286
287static void mca_fe_disable_clocks(struct mca_cluster *cl)
288{
289 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
290 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
291
292 device_link_del(cl->pd_link);
293 clk_disable_unprepare(cl->clk_parent);
294}
295
296static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
297{
298 struct mca_data *mca = cl->host;
299 struct mca_cluster *be_cl;
300 int stream, i;
301
302 mutex_lock(&mca->port_mutex);
303 for (i = 0; i < mca->nclusters; i++) {
304 be_cl = &mca->clusters[i];
305
306 if (be_cl->port_driver != cl->no)
307 continue;
308
309 for_each_pcm_streams(stream) {
310 if (be_cl->clocks_in_use[stream]) {
311 mutex_unlock(&mca->port_mutex);
312 return true;
313 }
314 }
315 }
316 mutex_unlock(&mca->port_mutex);
317 return false;
318}
319
320static int mca_be_prepare(struct snd_pcm_substream *substream,
321 struct snd_soc_dai *dai)
322{
323 struct mca_cluster *cl = mca_dai_to_cluster(dai);
324 struct mca_data *mca = cl->host;
325 struct mca_cluster *fe_cl;
326 int ret;
327
328 if (cl->port_driver < 0)
329 return -EINVAL;
330
331 fe_cl = &mca->clusters[cl->port_driver];
332
333 /*
334 * Typically the CODECs we are paired with will require clocks
335 * to be present at time of unmute with the 'mute_stream' op
336 * or at time of DAPM widget power-up. We need to enable clocks
337 * here at the latest (frontend prepare would be too late).
338 */
339 if (!mca_fe_clocks_in_use(fe_cl)) {
340 ret = mca_fe_enable_clocks(fe_cl);
341 if (ret < 0)
342 return ret;
343 }
344
345 cl->clocks_in_use[substream->stream] = true;
346
347 return 0;
348}
349
350static int mca_be_hw_free(struct snd_pcm_substream *substream,
351 struct snd_soc_dai *dai)
352{
353 struct mca_cluster *cl = mca_dai_to_cluster(dai);
354 struct mca_data *mca = cl->host;
355 struct mca_cluster *fe_cl;
356
357 if (cl->port_driver < 0)
358 return -EINVAL;
359
360 /*
361 * We are operating on a foreign cluster here, but since we
362 * belong to the same PCM, accesses should have been
363 * synchronized at ASoC level.
364 */
365 fe_cl = &mca->clusters[cl->port_driver];
366 if (!mca_fe_clocks_in_use(fe_cl))
367 return 0; /* Nothing to do */
368
369 cl->clocks_in_use[substream->stream] = false;
370
371 if (!mca_fe_clocks_in_use(fe_cl))
372 mca_fe_disable_clocks(fe_cl);
373
374 return 0;
375}
376
377static unsigned int mca_crop_mask(unsigned int mask, int nchans)
378{
379 while (hweight32(mask) > nchans)
380 mask &= ~(1 << __fls(mask));
381
382 return mask;
383}
384
385static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
386 unsigned int mask, int slots, int nchans,
387 int slot_width, bool is_tx, int port)
388{
389 __iomem void *serdes_base = cl->base + serdes_unit;
390 u32 serdes_conf, serdes_conf_mask;
391
392 serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS;
393 serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1);
394 switch (slot_width) {
395 case 16:
396 serdes_conf |= SERDES_CONF_WIDTH_16BIT;
397 break;
398 case 20:
399 serdes_conf |= SERDES_CONF_WIDTH_20BIT;
400 break;
401 case 24:
402 serdes_conf |= SERDES_CONF_WIDTH_24BIT;
403 break;
404 case 32:
405 serdes_conf |= SERDES_CONF_WIDTH_32BIT;
406 break;
407 default:
408 goto err;
409 }
410
411 serdes_conf_mask |= SERDES_CONF_SYNC_SEL;
412 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
413
414 if (is_tx) {
415 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
416 SERDES_CONF_UNK3;
417 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
418 SERDES_CONF_UNK3;
419 } else {
420 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
421 SERDES_CONF_UNK3 |
422 SERDES_CONF_NO_DATA_FEEDBACK;
423 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
424 SERDES_CONF_NO_DATA_FEEDBACK;
425 }
426
427 mca_modify(cl,
428 serdes_unit +
429 (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF),
430 serdes_conf_mask, serdes_conf);
431
432 if (is_tx) {
433 writel_relaxed(0xffffffff,
434 serdes_base + REG_TX_SERDES_SLOTMASK);
435 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
436 serdes_base + REG_TX_SERDES_SLOTMASK + 0x4);
437 writel_relaxed(0xffffffff,
438 serdes_base + REG_TX_SERDES_SLOTMASK + 0x8);
439 writel_relaxed(~((u32)mask),
440 serdes_base + REG_TX_SERDES_SLOTMASK + 0xc);
441 } else {
442 writel_relaxed(0xffffffff,
443 serdes_base + REG_RX_SERDES_SLOTMASK);
444 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
445 serdes_base + REG_RX_SERDES_SLOTMASK + 0x4);
446 writel_relaxed(1 << port,
447 serdes_base + REG_RX_SERDES_PORT);
448 }
449
450 return 0;
451
452err:
453 dev_err(cl->host->dev,
454 "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n",
455 mask, slots, slot_width);
456 return -EINVAL;
457}
458
459static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
460 unsigned int rx_mask, int slots, int slot_width)
461{
462 struct mca_cluster *cl = mca_dai_to_cluster(dai);
463
464 cl->tdm_slots = slots;
465 cl->tdm_slot_width = slot_width;
466 cl->tdm_tx_mask = tx_mask;
467 cl->tdm_rx_mask = rx_mask;
468
469 return 0;
470}
471
472static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
473{
474 struct mca_cluster *cl = mca_dai_to_cluster(dai);
475 struct mca_data *mca = cl->host;
476 bool fpol_inv = false;
477 u32 serdes_conf = 0;
478 u32 bitstart;
479
480 if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) !=
481 SND_SOC_DAIFMT_BP_FP)
482 goto err;
483
484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
485 case SND_SOC_DAIFMT_I2S:
486 fpol_inv = 0;
487 bitstart = 1;
488 break;
489 case SND_SOC_DAIFMT_LEFT_J:
490 fpol_inv = 1;
491 bitstart = 0;
492 break;
493 default:
494 goto err;
495 }
496
497 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
498 case SND_SOC_DAIFMT_NB_IF:
499 case SND_SOC_DAIFMT_IB_IF:
500 fpol_inv ^= 1;
501 break;
502 }
503
504 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
505 case SND_SOC_DAIFMT_NB_NF:
506 case SND_SOC_DAIFMT_NB_IF:
507 serdes_conf |= SERDES_CONF_BCLK_POL;
508 break;
509 }
510
511 if (!fpol_inv)
512 goto err;
513
514 mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF,
515 SERDES_CONF_BCLK_POL, serdes_conf);
516 mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF,
517 SERDES_CONF_BCLK_POL, serdes_conf);
518 writel_relaxed(bitstart,
519 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
520 writel_relaxed(bitstart,
521 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
522
523 return 0;
524
525err:
526 dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt);
527 return -EINVAL;
528}
529
530static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
531{
532 struct mca_cluster *cl = mca_dai_to_cluster(dai);
533
534 cl->bclk_ratio = ratio;
535
536 return 0;
537}
538
539static int mca_fe_get_port(struct snd_pcm_substream *substream)
540{
541 struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
542 struct snd_soc_pcm_runtime *be;
543 struct snd_soc_dpcm *dpcm;
544
545 be = NULL;
546 for_each_dpcm_be(fe, substream->stream, dpcm) {
547 be = dpcm->be;
548 break;
549 }
550
551 if (!be)
552 return -EINVAL;
553
554 return mca_dai_to_cluster(asoc_rtd_to_cpu(be, 0))->no;
555}
556
557static int mca_fe_hw_params(struct snd_pcm_substream *substream,
558 struct snd_pcm_hw_params *params,
559 struct snd_soc_dai *dai)
560{
561 struct mca_cluster *cl = mca_dai_to_cluster(dai);
562 struct mca_data *mca = cl->host;
563 struct device *dev = mca->dev;
564 unsigned int samp_rate = params_rate(params);
565 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
566 bool refine_tdm = false;
567 unsigned long bclk_ratio;
568 unsigned int tdm_slots, tdm_slot_width, tdm_mask;
569 u32 regval, pad;
570 int ret, port, nchans_ceiled;
571
572 if (!cl->tdm_slot_width) {
573 /*
574 * We were not given TDM settings from above, set initial
575 * guesses which will later be refined.
576 */
577 tdm_slot_width = params_width(params);
578 tdm_slots = params_channels(params);
579 refine_tdm = true;
580 } else {
581 tdm_slot_width = cl->tdm_slot_width;
582 tdm_slots = cl->tdm_slots;
583 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
584 }
585
586 if (cl->bclk_ratio)
587 bclk_ratio = cl->bclk_ratio;
588 else
589 bclk_ratio = tdm_slot_width * tdm_slots;
590
591 if (refine_tdm) {
592 int nchannels = params_channels(params);
593
594 if (nchannels > 2) {
595 dev_err(dev, "missing TDM for stream with two or more channels\n");
596 return -EINVAL;
597 }
598
599 if ((bclk_ratio % nchannels) != 0) {
600 dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n",
601 bclk_ratio, nchannels);
602 return -EINVAL;
603 }
604
605 tdm_slot_width = bclk_ratio / nchannels;
606
607 if (tdm_slot_width > 32 && nchannels == 1)
608 tdm_slot_width = 32;
609
610 if (tdm_slot_width < params_width(params)) {
611 dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n",
612 tdm_slot_width, params_width(params));
613 return -EINVAL;
614 }
615
616 tdm_mask = (1 << tdm_slots) - 1;
617 }
618
619 port = mca_fe_get_port(substream);
620 if (port < 0)
621 return port;
622
623 ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
624 tdm_mask, tdm_slots, params_channels(params),
625 tdm_slot_width, is_tx, port);
626 if (ret)
627 return ret;
628
629 pad = 32 - params_width(params);
630
631 /*
632 * TODO: Here the register semantics aren't clear.
633 */
634 nchans_ceiled = min_t(int, params_channels(params), 4);
635 regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) |
636 FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) |
637 FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) |
638 FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) |
639 FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad);
640
641#ifndef USE_RXB_FOR_CAPTURE
642 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
643#else
644 if (is_tx)
645 writel_relaxed(regval,
646 mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
647 else
648 writel_relaxed(regval,
649 mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
650#endif
651
652 if (!mca_fe_clocks_in_use(cl)) {
653 /*
654 * Set up FSYNC duty cycle as even as possible.
655 */
656 writel_relaxed((bclk_ratio / 2) - 1,
657 cl->base + REG_SYNCGEN_HI_PERIOD);
658 writel_relaxed(((bclk_ratio + 1) / 2) - 1,
659 cl->base + REG_SYNCGEN_LO_PERIOD);
660 writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1),
661 cl->base + REG_MCLK_CONF);
662
663 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
664 if (ret) {
665 dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n",
666 cl->no, ret);
667 return ret;
668 }
669 }
670
671 return 0;
672}
673
674static const struct snd_soc_dai_ops mca_fe_ops = {
675 .set_fmt = mca_fe_set_fmt,
676 .set_bclk_ratio = mca_set_bclk_ratio,
677 .set_tdm_slot = mca_fe_set_tdm_slot,
678 .hw_params = mca_fe_hw_params,
679 .trigger = mca_fe_trigger,
680};
681
682static bool mca_be_started(struct mca_cluster *cl)
683{
684 int stream;
685
686 for_each_pcm_streams(stream)
687 if (cl->port_started[stream])
688 return true;
689 return false;
690}
691
692static int mca_be_startup(struct snd_pcm_substream *substream,
693 struct snd_soc_dai *dai)
694{
695 struct snd_soc_pcm_runtime *be = asoc_substream_to_rtd(substream);
696 struct snd_soc_pcm_runtime *fe;
697 struct mca_cluster *cl = mca_dai_to_cluster(dai);
698 struct mca_cluster *fe_cl;
699 struct mca_data *mca = cl->host;
700 struct snd_soc_dpcm *dpcm;
701
702 fe = NULL;
703
704 for_each_dpcm_fe(be, substream->stream, dpcm) {
705 if (fe && dpcm->fe != fe) {
706 dev_err(mca->dev, "many FE per one BE unsupported\n");
707 return -EINVAL;
708 }
709
710 fe = dpcm->fe;
711 }
712
713 if (!fe)
714 return -EINVAL;
715
716 fe_cl = mca_dai_to_cluster(asoc_rtd_to_cpu(fe, 0));
717
718 if (mca_be_started(cl)) {
719 /*
720 * Port is already started in the other direction.
721 * Make sure there isn't a conflict with another cluster
722 * driving the port.
723 */
724 if (cl->port_driver != fe_cl->no)
725 return -EINVAL;
726
727 cl->port_started[substream->stream] = true;
728 return 0;
729 }
730
731 writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA,
732 cl->base + REG_PORT_ENABLES);
733 writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
734 cl->base + REG_PORT_CLOCK_SEL);
735 writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
736 cl->base + REG_PORT_DATA_SEL);
737 mutex_lock(&mca->port_mutex);
738 cl->port_driver = fe_cl->no;
739 mutex_unlock(&mca->port_mutex);
740 cl->port_started[substream->stream] = true;
741
742 return 0;
743}
744
745static void mca_be_shutdown(struct snd_pcm_substream *substream,
746 struct snd_soc_dai *dai)
747{
748 struct mca_cluster *cl = mca_dai_to_cluster(dai);
749 struct mca_data *mca = cl->host;
750
751 cl->port_started[substream->stream] = false;
752
753 if (!mca_be_started(cl)) {
754 /*
755 * Were we the last direction to shutdown?
756 * Turn off the lights.
757 */
758 writel_relaxed(0, cl->base + REG_PORT_ENABLES);
759 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
760 mutex_lock(&mca->port_mutex);
761 cl->port_driver = -1;
762 mutex_unlock(&mca->port_mutex);
763 }
764}
765
766static const struct snd_soc_dai_ops mca_be_ops = {
767 .prepare = mca_be_prepare,
768 .hw_free = mca_be_hw_free,
769 .startup = mca_be_startup,
770 .shutdown = mca_be_shutdown,
771};
772
773static int mca_set_runtime_hwparams(struct snd_soc_component *component,
774 struct snd_pcm_substream *substream,
775 struct dma_chan *chan)
776{
777 struct device *dma_dev = chan->device->dev;
778 struct snd_dmaengine_dai_dma_data dma_data = {};
779 int ret;
780
781 struct snd_pcm_hardware hw;
782
783 memset(&hw, 0, sizeof(hw));
784
785 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
786 SNDRV_PCM_INFO_INTERLEAVED;
787 hw.periods_min = 2;
788 hw.periods_max = UINT_MAX;
789 hw.period_bytes_min = 256;
790 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
791 hw.buffer_bytes_max = SIZE_MAX;
792 hw.fifo_size = 16;
793
794 ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data,
795 &hw, chan);
796
797 if (ret)
798 return ret;
799
800 return snd_soc_set_runtime_hwparams(substream, &hw);
801}
802
803static int mca_pcm_open(struct snd_soc_component *component,
804 struct snd_pcm_substream *substream)
805{
806 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
807 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
808 struct dma_chan *chan = cl->dma_chans[substream->stream];
809 int ret;
810
811 if (rtd->dai_link->no_pcm)
812 return 0;
813
814 ret = mca_set_runtime_hwparams(component, substream, chan);
815 if (ret)
816 return ret;
817
818 return snd_dmaengine_pcm_open(substream, chan);
819}
820
821static int mca_hw_params(struct snd_soc_component *component,
822 struct snd_pcm_substream *substream,
823 struct snd_pcm_hw_params *params)
824{
825 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
826 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
827 struct dma_slave_config slave_config;
828 int ret;
829
830 if (rtd->dai_link->no_pcm)
831 return 0;
832
833 memset(&slave_config, 0, sizeof(slave_config));
834 ret = snd_hwparams_to_dma_slave_config(substream, params,
835 &slave_config);
836 if (ret < 0)
837 return ret;
838
839 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
840 slave_config.dst_port_window_size =
841 min_t(u32, params_channels(params), 4);
842 else
843 slave_config.src_port_window_size =
844 min_t(u32, params_channels(params), 4);
845
846 return dmaengine_slave_config(chan, &slave_config);
847}
848
849static int mca_close(struct snd_soc_component *component,
850 struct snd_pcm_substream *substream)
851{
852 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
853
854 if (rtd->dai_link->no_pcm)
855 return 0;
856
857 return snd_dmaengine_pcm_close(substream);
858}
859
860static int mca_trigger(struct snd_soc_component *component,
861 struct snd_pcm_substream *substream, int cmd)
862{
863 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
864
865 if (rtd->dai_link->no_pcm)
866 return 0;
867
868 /*
869 * Before we do the PCM trigger proper, insert an opportunity
870 * to reset the frontend's SERDES.
871 */
872 mca_fe_early_trigger(substream, cmd, asoc_rtd_to_cpu(rtd, 0));
873
874 return snd_dmaengine_pcm_trigger(substream, cmd);
875}
876
877static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component,
878 struct snd_pcm_substream *substream)
879{
880 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
881
882 if (rtd->dai_link->no_pcm)
883 return -ENOTSUPP;
884
885 return snd_dmaengine_pcm_pointer(substream);
886}
887
888static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream)
889{
890 bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
891#ifndef USE_RXB_FOR_CAPTURE
892 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
893 is_tx ? "tx%da" : "rx%da", cl->no);
894#else
895 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
896 is_tx ? "tx%da" : "rx%db", cl->no);
897#endif
898 return of_dma_request_slave_channel(cl->host->dev->of_node, name);
899
900}
901
902static void mca_pcm_free(struct snd_soc_component *component,
903 struct snd_pcm *pcm)
904{
905 struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm);
906 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
907 unsigned int i;
908
909 if (rtd->dai_link->no_pcm)
910 return;
911
912 for_each_pcm_streams(i) {
913 struct snd_pcm_substream *substream =
914 rtd->pcm->streams[i].substream;
915
916 if (!substream || !cl->dma_chans[i])
917 continue;
918
919 dma_release_channel(cl->dma_chans[i]);
920 cl->dma_chans[i] = NULL;
921 }
922}
923
924
925static int mca_pcm_new(struct snd_soc_component *component,
926 struct snd_soc_pcm_runtime *rtd)
927{
928 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0));
929 unsigned int i;
930
931 if (rtd->dai_link->no_pcm)
932 return 0;
933
934 for_each_pcm_streams(i) {
935 struct snd_pcm_substream *substream =
936 rtd->pcm->streams[i].substream;
937 struct dma_chan *chan;
938
939 if (!substream)
940 continue;
941
942 chan = mca_request_dma_channel(cl, i);
943
944 if (IS_ERR_OR_NULL(chan)) {
945 dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
946 i, cl->no, chan);
947 mca_pcm_free(component, rtd->pcm);
948 return -EINVAL;
949 }
950
951 cl->dma_chans[i] = chan;
952 snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM,
953 chan->device->dev, 512 * 1024 * 6,
954 SIZE_MAX);
955 }
956
957 return 0;
958}
959
960static const struct snd_soc_component_driver mca_component = {
961 .name = "apple-mca",
962 .open = mca_pcm_open,
963 .close = mca_close,
964 .hw_params = mca_hw_params,
965 .trigger = mca_trigger,
966 .pointer = mca_pointer,
967 .pcm_construct = mca_pcm_new,
968 .pcm_destruct = mca_pcm_free,
969};
970
971static void apple_mca_release(struct mca_data *mca)
972{
973 int i;
974
975 for (i = 0; i < mca->nclusters; i++) {
976 struct mca_cluster *cl = &mca->clusters[i];
977
978 if (!IS_ERR_OR_NULL(cl->clk_parent))
979 clk_put(cl->clk_parent);
980
981 if (!IS_ERR_OR_NULL(cl->pd_dev))
982 dev_pm_domain_detach(cl->pd_dev, true);
983 }
984
985 if (mca->pd_link)
986 device_link_del(mca->pd_link);
987
988 if (!IS_ERR_OR_NULL(mca->pd_dev))
989 dev_pm_domain_detach(mca->pd_dev, true);
990
991 reset_control_rearm(mca->rstc);
992}
993
994static int apple_mca_probe(struct platform_device *pdev)
995{
996 struct mca_data *mca;
997 struct mca_cluster *clusters;
998 struct snd_soc_dai_driver *dai_drivers;
999 struct resource *res;
1000 void __iomem *base;
1001 int nclusters;
1002 int ret, i;
1003
1004 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1005 if (IS_ERR(base))
1006 return PTR_ERR(base);
1007
1008 if (resource_size(res) < CLUSTER_STRIDE)
1009 return -EINVAL;
1010 nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1;
1011
1012 mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters),
1013 GFP_KERNEL);
1014 if (!mca)
1015 return -ENOMEM;
1016 mca->dev = &pdev->dev;
1017 mca->nclusters = nclusters;
1018 mutex_init(&mca->port_mutex);
1019 platform_set_drvdata(pdev, mca);
1020 clusters = mca->clusters;
1021
1022 mca->switch_base =
1023 devm_platform_ioremap_resource(pdev, 1);
1024 if (IS_ERR(mca->switch_base))
1025 return PTR_ERR(mca->switch_base);
1026
1027 mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
1028 if (IS_ERR(mca->rstc))
1029 return PTR_ERR(mca->rstc);
1030
1031 dai_drivers = devm_kzalloc(
1032 &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL);
1033 if (!dai_drivers)
1034 return -ENOMEM;
1035
1036 mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0);
1037 if (IS_ERR(mca->pd_dev))
1038 return -EINVAL;
1039
1040 mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev,
1041 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
1042 DL_FLAG_RPM_ACTIVE);
1043 if (!mca->pd_link) {
1044 ret = -EINVAL;
1045 /* Prevent an unbalanced reset rearm */
1046 mca->rstc = NULL;
1047 goto err_release;
1048 }
1049
1050 reset_control_reset(mca->rstc);
1051
1052 for (i = 0; i < nclusters; i++) {
1053 struct mca_cluster *cl = &clusters[i];
1054 struct snd_soc_dai_driver *fe =
1055 &dai_drivers[mca->nclusters + i];
1056 struct snd_soc_dai_driver *be = &dai_drivers[i];
1057
1058 cl->host = mca;
1059 cl->no = i;
1060 cl->base = base + CLUSTER_STRIDE * i;
1061 cl->port_driver = -1;
1062 cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
1063 if (IS_ERR(cl->clk_parent)) {
1064 dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
1065 i, PTR_ERR(cl->clk_parent));
1066 ret = PTR_ERR(cl->clk_parent);
1067 goto err_release;
1068 }
1069 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
1070 if (IS_ERR(cl->pd_dev)) {
1071 dev_err(&pdev->dev,
1072 "unable to obtain cluster %d PD: %ld\n", i,
1073 PTR_ERR(cl->pd_dev));
1074 ret = PTR_ERR(cl->pd_dev);
1075 goto err_release;
1076 }
1077
1078 fe->id = i;
1079 fe->name =
1080 devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i);
1081 if (!fe->name) {
1082 ret = -ENOMEM;
1083 goto err_release;
1084 }
1085 fe->ops = &mca_fe_ops;
1086 fe->playback.channels_min = 1;
1087 fe->playback.channels_max = 32;
1088 fe->playback.rates = SNDRV_PCM_RATE_8000_192000;
1089 fe->playback.formats = APPLE_MCA_FMTBITS;
1090 fe->capture.channels_min = 1;
1091 fe->capture.channels_max = 32;
1092 fe->capture.rates = SNDRV_PCM_RATE_8000_192000;
1093 fe->capture.formats = APPLE_MCA_FMTBITS;
1094 fe->symmetric_rate = 1;
1095
1096 fe->playback.stream_name =
1097 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i);
1098 fe->capture.stream_name =
1099 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i);
1100
1101 if (!fe->playback.stream_name || !fe->capture.stream_name) {
1102 ret = -ENOMEM;
1103 goto err_release;
1104 }
1105
1106 be->id = i + nclusters;
1107 be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i);
1108 if (!be->name) {
1109 ret = -ENOMEM;
1110 goto err_release;
1111 }
1112 be->ops = &mca_be_ops;
1113 be->playback.channels_min = 1;
1114 be->playback.channels_max = 32;
1115 be->playback.rates = SNDRV_PCM_RATE_8000_192000;
1116 be->playback.formats = APPLE_MCA_FMTBITS;
1117 be->capture.channels_min = 1;
1118 be->capture.channels_max = 32;
1119 be->capture.rates = SNDRV_PCM_RATE_8000_192000;
1120 be->capture.formats = APPLE_MCA_FMTBITS;
1121
1122 be->playback.stream_name =
1123 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i);
1124 be->capture.stream_name =
1125 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i);
1126 if (!be->playback.stream_name || !be->capture.stream_name) {
1127 ret = -ENOMEM;
1128 goto err_release;
1129 }
1130 }
1131
1132 ret = snd_soc_register_component(&pdev->dev, &mca_component,
1133 dai_drivers, nclusters * 2);
1134 if (ret) {
1135 dev_err(&pdev->dev, "unable to register ASoC component: %d\n",
1136 ret);
1137 goto err_release;
1138 }
1139
1140 return 0;
1141
1142err_release:
1143 apple_mca_release(mca);
1144 return ret;
1145}
1146
1147static int apple_mca_remove(struct platform_device *pdev)
1148{
1149 struct mca_data *mca = platform_get_drvdata(pdev);
1150
1151 snd_soc_unregister_component(&pdev->dev);
1152 apple_mca_release(mca);
1153 return 0;
1154}
1155
1156static const struct of_device_id apple_mca_of_match[] = {
1157 { .compatible = "apple,mca", },
1158 {}
1159};
1160MODULE_DEVICE_TABLE(of, apple_mca_of_match);
1161
1162static struct platform_driver apple_mca_driver = {
1163 .driver = {
1164 .name = "apple-mca",
1165 .of_match_table = apple_mca_of_match,
1166 },
1167 .probe = apple_mca_probe,
1168 .remove = apple_mca_remove,
1169};
1170module_platform_driver(apple_mca_driver);
1171
1172MODULE_AUTHOR("Martin PoviĊĦer <povik+lin@cutebit.org>");
1173MODULE_DESCRIPTION("ASoC Apple MCA driver");
1174MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2//
3// Apple SoCs MCA driver
4//
5// Copyright (C) The Asahi Linux Contributors
6//
7// The MCA peripheral is made up of a number of identical units called clusters.
8// Each cluster has its separate clock parent, SYNC signal generator, carries
9// four SERDES units and has a dedicated I2S port on the SoC's periphery.
10//
11// The clusters can operate independently, or can be combined together in a
12// configurable manner. We mostly treat them as self-contained independent
13// units and don't configure any cross-cluster connections except for the I2S
14// ports. The I2S ports can be routed to any of the clusters (irrespective
15// of their native cluster). We map this onto ASoC's (DPCM) notion of backend
16// and frontend DAIs. The 'cluster guts' are frontends which are dynamically
17// routed to backend I2S ports.
18//
19// DAI references in devicetree are resolved to backends. The routing between
20// frontends and backends is determined by the machine driver in the DAPM paths
21// it supplies.
22
23#include <linux/bitfield.h>
24#include <linux/clk.h>
25#include <linux/dma-mapping.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/of.h>
30#include <linux/of_clk.h>
31#include <linux/of_dma.h>
32#include <linux/platform_device.h>
33#include <linux/pm_domain.h>
34#include <linux/regmap.h>
35#include <linux/reset.h>
36#include <linux/slab.h>
37
38#include <sound/core.h>
39#include <sound/pcm.h>
40#include <sound/pcm_params.h>
41#include <sound/soc.h>
42#include <sound/dmaengine_pcm.h>
43
44#define USE_RXB_FOR_CAPTURE
45
46/* Relative to cluster base */
47#define REG_STATUS 0x0
48#define STATUS_MCLK_EN BIT(0)
49#define REG_MCLK_CONF 0x4
50#define MCLK_CONF_DIV GENMASK(11, 8)
51
52#define REG_SYNCGEN_STATUS 0x100
53#define SYNCGEN_STATUS_EN BIT(0)
54#define REG_SYNCGEN_MCLK_SEL 0x104
55#define SYNCGEN_MCLK_SEL GENMASK(3, 0)
56#define REG_SYNCGEN_HI_PERIOD 0x108
57#define REG_SYNCGEN_LO_PERIOD 0x10c
58
59#define REG_PORT_ENABLES 0x600
60#define PORT_ENABLES_CLOCKS GENMASK(2, 1)
61#define PORT_ENABLES_TX_DATA BIT(3)
62#define REG_PORT_CLOCK_SEL 0x604
63#define PORT_CLOCK_SEL GENMASK(11, 8)
64#define REG_PORT_DATA_SEL 0x608
65#define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2))
66#define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2))
67
68#define REG_INTSTATE 0x700
69#define REG_INTMASK 0x704
70
71/* Bases of serdes units (relative to cluster) */
72#define CLUSTER_RXA_OFF 0x200
73#define CLUSTER_TXA_OFF 0x300
74#define CLUSTER_RXB_OFF 0x400
75#define CLUSTER_TXB_OFF 0x500
76
77#define CLUSTER_TX_OFF CLUSTER_TXA_OFF
78
79#ifndef USE_RXB_FOR_CAPTURE
80#define CLUSTER_RX_OFF CLUSTER_RXA_OFF
81#else
82#define CLUSTER_RX_OFF CLUSTER_RXB_OFF
83#endif
84
85/* Relative to serdes unit base */
86#define REG_SERDES_STATUS 0x00
87#define SERDES_STATUS_EN BIT(0)
88#define SERDES_STATUS_RST BIT(1)
89#define REG_TX_SERDES_CONF 0x04
90#define REG_RX_SERDES_CONF 0x08
91#define SERDES_CONF_NCHANS GENMASK(3, 0)
92#define SERDES_CONF_WIDTH_MASK GENMASK(8, 4)
93#define SERDES_CONF_WIDTH_16BIT 0x40
94#define SERDES_CONF_WIDTH_20BIT 0x80
95#define SERDES_CONF_WIDTH_24BIT 0xc0
96#define SERDES_CONF_WIDTH_32BIT 0x100
97#define SERDES_CONF_BCLK_POL 0x400
98#define SERDES_CONF_LSB_FIRST 0x800
99#define SERDES_CONF_UNK1 BIT(12)
100#define SERDES_CONF_UNK2 BIT(13)
101#define SERDES_CONF_UNK3 BIT(14)
102#define SERDES_CONF_NO_DATA_FEEDBACK BIT(15)
103#define SERDES_CONF_SYNC_SEL GENMASK(18, 16)
104#define REG_TX_SERDES_BITSTART 0x08
105#define REG_RX_SERDES_BITSTART 0x0c
106#define REG_TX_SERDES_SLOTMASK 0x0c
107#define REG_RX_SERDES_SLOTMASK 0x10
108#define REG_RX_SERDES_PORT 0x04
109
110/* Relative to switch base */
111#define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl))
112#define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000)
113#define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0)
114#define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5)
115#define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8)
116#define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13)
117#define DMA_ADAPTER_NCHANS GENMASK(22, 20)
118
119#define SWITCH_STRIDE 0x8000
120#define CLUSTER_STRIDE 0x4000
121
122#define MAX_NCLUSTERS 6
123
124#define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \
125 SNDRV_PCM_FMTBIT_S24_LE | \
126 SNDRV_PCM_FMTBIT_S32_LE)
127
128struct mca_cluster {
129 int no;
130 __iomem void *base;
131 struct mca_data *host;
132 struct device *pd_dev;
133 struct clk *clk_parent;
134 struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1];
135
136 bool port_started[SNDRV_PCM_STREAM_LAST + 1];
137 int port_driver; /* The cluster driving this cluster's port */
138
139 bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1];
140 struct device_link *pd_link;
141
142 unsigned int bclk_ratio;
143
144 /* Masks etc. picked up via the set_tdm_slot method */
145 int tdm_slots;
146 int tdm_slot_width;
147 unsigned int tdm_tx_mask;
148 unsigned int tdm_rx_mask;
149};
150
151struct mca_data {
152 struct device *dev;
153
154 __iomem void *switch_base;
155
156 struct device *pd_dev;
157 struct reset_control *rstc;
158 struct device_link *pd_link;
159
160 /* Mutex for accessing port_driver of foreign clusters */
161 struct mutex port_mutex;
162
163 int nclusters;
164 struct mca_cluster clusters[] __counted_by(nclusters);
165};
166
167static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val)
168{
169 __iomem void *ptr = cl->base + regoffset;
170 u32 newval;
171
172 newval = (val & mask) | (readl_relaxed(ptr) & ~mask);
173 writel_relaxed(newval, ptr);
174}
175
176/*
177 * Get the cluster of FE or BE DAI
178 */
179static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai)
180{
181 struct mca_data *mca = snd_soc_dai_get_drvdata(dai);
182 /*
183 * FE DAIs are 0 ... nclusters - 1
184 * BE DAIs are nclusters ... 2*nclusters - 1
185 */
186 int cluster_no = dai->id % mca->nclusters;
187
188 return &mca->clusters[cluster_no];
189}
190
191/* called before PCM trigger */
192static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
193 struct snd_soc_dai *dai)
194{
195 struct mca_cluster *cl = mca_dai_to_cluster(dai);
196 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
197 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
198 int serdes_conf =
199 serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF);
200
201 switch (cmd) {
202 case SNDRV_PCM_TRIGGER_START:
203 case SNDRV_PCM_TRIGGER_RESUME:
204 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
205 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
206 FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
207 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
208 FIELD_PREP(SERDES_CONF_SYNC_SEL, 7));
209 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
210 SERDES_STATUS_EN | SERDES_STATUS_RST,
211 SERDES_STATUS_RST);
212 /*
213 * Experiments suggest that it takes at most ~1 us
214 * for the bit to clear, so wait 2 us for good measure.
215 */
216 udelay(2);
217 WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
218 SERDES_STATUS_RST);
219 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
220 FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
221 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
222 FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
223 break;
224 default:
225 break;
226 }
227}
228
229static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd,
230 struct snd_soc_dai *dai)
231{
232 struct mca_cluster *cl = mca_dai_to_cluster(dai);
233 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
234 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
235
236 switch (cmd) {
237 case SNDRV_PCM_TRIGGER_START:
238 case SNDRV_PCM_TRIGGER_RESUME:
239 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
240 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
241 SERDES_STATUS_EN | SERDES_STATUS_RST,
242 SERDES_STATUS_EN);
243 break;
244
245 case SNDRV_PCM_TRIGGER_STOP:
246 case SNDRV_PCM_TRIGGER_SUSPEND:
247 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
248 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
249 SERDES_STATUS_EN, 0);
250 break;
251
252 default:
253 return -EINVAL;
254 }
255
256 return 0;
257}
258
259static int mca_fe_enable_clocks(struct mca_cluster *cl)
260{
261 struct mca_data *mca = cl->host;
262 int ret;
263
264 ret = clk_prepare_enable(cl->clk_parent);
265 if (ret) {
266 dev_err(mca->dev,
267 "cluster %d: unable to enable clock parent: %d\n",
268 cl->no, ret);
269 return ret;
270 }
271
272 /*
273 * We can't power up the device earlier than this because
274 * the power state driver would error out on seeing the device
275 * as clock-gated.
276 */
277 cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
278 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
279 DL_FLAG_RPM_ACTIVE);
280 if (!cl->pd_link) {
281 dev_err(mca->dev,
282 "cluster %d: unable to prop-up power domain\n", cl->no);
283 clk_disable_unprepare(cl->clk_parent);
284 return -EINVAL;
285 }
286
287 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
288 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
289 SYNCGEN_STATUS_EN);
290 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN);
291
292 return 0;
293}
294
295static void mca_fe_disable_clocks(struct mca_cluster *cl)
296{
297 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
298 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
299
300 device_link_del(cl->pd_link);
301 clk_disable_unprepare(cl->clk_parent);
302}
303
304static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
305{
306 struct mca_data *mca = cl->host;
307 struct mca_cluster *be_cl;
308 int stream, i;
309
310 mutex_lock(&mca->port_mutex);
311 for (i = 0; i < mca->nclusters; i++) {
312 be_cl = &mca->clusters[i];
313
314 if (be_cl->port_driver != cl->no)
315 continue;
316
317 for_each_pcm_streams(stream) {
318 if (be_cl->clocks_in_use[stream]) {
319 mutex_unlock(&mca->port_mutex);
320 return true;
321 }
322 }
323 }
324 mutex_unlock(&mca->port_mutex);
325 return false;
326}
327
328static int mca_be_prepare(struct snd_pcm_substream *substream,
329 struct snd_soc_dai *dai)
330{
331 struct mca_cluster *cl = mca_dai_to_cluster(dai);
332 struct mca_data *mca = cl->host;
333 struct mca_cluster *fe_cl;
334 int ret;
335
336 if (cl->port_driver < 0)
337 return -EINVAL;
338
339 fe_cl = &mca->clusters[cl->port_driver];
340
341 /*
342 * Typically the CODECs we are paired with will require clocks
343 * to be present at time of unmute with the 'mute_stream' op
344 * or at time of DAPM widget power-up. We need to enable clocks
345 * here at the latest (frontend prepare would be too late).
346 */
347 if (!mca_fe_clocks_in_use(fe_cl)) {
348 ret = mca_fe_enable_clocks(fe_cl);
349 if (ret < 0)
350 return ret;
351 }
352
353 cl->clocks_in_use[substream->stream] = true;
354
355 return 0;
356}
357
358static int mca_be_hw_free(struct snd_pcm_substream *substream,
359 struct snd_soc_dai *dai)
360{
361 struct mca_cluster *cl = mca_dai_to_cluster(dai);
362 struct mca_data *mca = cl->host;
363 struct mca_cluster *fe_cl;
364
365 if (cl->port_driver < 0)
366 return -EINVAL;
367
368 /*
369 * We are operating on a foreign cluster here, but since we
370 * belong to the same PCM, accesses should have been
371 * synchronized at ASoC level.
372 */
373 fe_cl = &mca->clusters[cl->port_driver];
374 if (!mca_fe_clocks_in_use(fe_cl))
375 return 0; /* Nothing to do */
376
377 cl->clocks_in_use[substream->stream] = false;
378
379 if (!mca_fe_clocks_in_use(fe_cl))
380 mca_fe_disable_clocks(fe_cl);
381
382 return 0;
383}
384
385static unsigned int mca_crop_mask(unsigned int mask, int nchans)
386{
387 while (hweight32(mask) > nchans)
388 mask &= ~(1 << __fls(mask));
389
390 return mask;
391}
392
393static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
394 unsigned int mask, int slots, int nchans,
395 int slot_width, bool is_tx, int port)
396{
397 __iomem void *serdes_base = cl->base + serdes_unit;
398 u32 serdes_conf, serdes_conf_mask;
399
400 serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS;
401 serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1);
402 switch (slot_width) {
403 case 16:
404 serdes_conf |= SERDES_CONF_WIDTH_16BIT;
405 break;
406 case 20:
407 serdes_conf |= SERDES_CONF_WIDTH_20BIT;
408 break;
409 case 24:
410 serdes_conf |= SERDES_CONF_WIDTH_24BIT;
411 break;
412 case 32:
413 serdes_conf |= SERDES_CONF_WIDTH_32BIT;
414 break;
415 default:
416 goto err;
417 }
418
419 serdes_conf_mask |= SERDES_CONF_SYNC_SEL;
420 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
421
422 if (is_tx) {
423 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
424 SERDES_CONF_UNK3;
425 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
426 SERDES_CONF_UNK3;
427 } else {
428 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
429 SERDES_CONF_UNK3 |
430 SERDES_CONF_NO_DATA_FEEDBACK;
431 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
432 SERDES_CONF_NO_DATA_FEEDBACK;
433 }
434
435 mca_modify(cl,
436 serdes_unit +
437 (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF),
438 serdes_conf_mask, serdes_conf);
439
440 if (is_tx) {
441 writel_relaxed(0xffffffff,
442 serdes_base + REG_TX_SERDES_SLOTMASK);
443 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
444 serdes_base + REG_TX_SERDES_SLOTMASK + 0x4);
445 writel_relaxed(0xffffffff,
446 serdes_base + REG_TX_SERDES_SLOTMASK + 0x8);
447 writel_relaxed(~((u32)mask),
448 serdes_base + REG_TX_SERDES_SLOTMASK + 0xc);
449 } else {
450 writel_relaxed(0xffffffff,
451 serdes_base + REG_RX_SERDES_SLOTMASK);
452 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
453 serdes_base + REG_RX_SERDES_SLOTMASK + 0x4);
454 writel_relaxed(1 << port,
455 serdes_base + REG_RX_SERDES_PORT);
456 }
457
458 return 0;
459
460err:
461 dev_err(cl->host->dev,
462 "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n",
463 mask, slots, slot_width);
464 return -EINVAL;
465}
466
467static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
468 unsigned int rx_mask, int slots, int slot_width)
469{
470 struct mca_cluster *cl = mca_dai_to_cluster(dai);
471
472 cl->tdm_slots = slots;
473 cl->tdm_slot_width = slot_width;
474 cl->tdm_tx_mask = tx_mask;
475 cl->tdm_rx_mask = rx_mask;
476
477 return 0;
478}
479
480static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
481{
482 struct mca_cluster *cl = mca_dai_to_cluster(dai);
483 struct mca_data *mca = cl->host;
484 bool fpol_inv = false;
485 u32 serdes_conf = 0;
486 u32 bitstart;
487
488 if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) !=
489 SND_SOC_DAIFMT_BP_FP)
490 goto err;
491
492 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
493 case SND_SOC_DAIFMT_I2S:
494 fpol_inv = 0;
495 bitstart = 1;
496 break;
497 case SND_SOC_DAIFMT_LEFT_J:
498 fpol_inv = 1;
499 bitstart = 0;
500 break;
501 default:
502 goto err;
503 }
504
505 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
506 case SND_SOC_DAIFMT_NB_IF:
507 case SND_SOC_DAIFMT_IB_IF:
508 fpol_inv ^= 1;
509 break;
510 }
511
512 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
513 case SND_SOC_DAIFMT_NB_NF:
514 case SND_SOC_DAIFMT_NB_IF:
515 serdes_conf |= SERDES_CONF_BCLK_POL;
516 break;
517 }
518
519 if (!fpol_inv)
520 goto err;
521
522 mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF,
523 SERDES_CONF_BCLK_POL, serdes_conf);
524 mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF,
525 SERDES_CONF_BCLK_POL, serdes_conf);
526 writel_relaxed(bitstart,
527 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
528 writel_relaxed(bitstart,
529 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
530
531 return 0;
532
533err:
534 dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt);
535 return -EINVAL;
536}
537
538static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
539{
540 struct mca_cluster *cl = mca_dai_to_cluster(dai);
541
542 cl->bclk_ratio = ratio;
543
544 return 0;
545}
546
547static int mca_fe_get_port(struct snd_pcm_substream *substream)
548{
549 struct snd_soc_pcm_runtime *fe = snd_soc_substream_to_rtd(substream);
550 struct snd_soc_pcm_runtime *be;
551 struct snd_soc_dpcm *dpcm;
552
553 be = NULL;
554 for_each_dpcm_be(fe, substream->stream, dpcm) {
555 be = dpcm->be;
556 break;
557 }
558
559 if (!be)
560 return -EINVAL;
561
562 return mca_dai_to_cluster(snd_soc_rtd_to_cpu(be, 0))->no;
563}
564
565static int mca_fe_hw_params(struct snd_pcm_substream *substream,
566 struct snd_pcm_hw_params *params,
567 struct snd_soc_dai *dai)
568{
569 struct mca_cluster *cl = mca_dai_to_cluster(dai);
570 struct mca_data *mca = cl->host;
571 struct device *dev = mca->dev;
572 unsigned int samp_rate = params_rate(params);
573 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
574 bool refine_tdm = false;
575 unsigned long bclk_ratio;
576 unsigned int tdm_slots, tdm_slot_width, tdm_mask;
577 u32 regval, pad;
578 int ret, port, nchans_ceiled;
579
580 if (!cl->tdm_slot_width) {
581 /*
582 * We were not given TDM settings from above, set initial
583 * guesses which will later be refined.
584 */
585 tdm_slot_width = params_width(params);
586 tdm_slots = params_channels(params);
587 refine_tdm = true;
588 } else {
589 tdm_slot_width = cl->tdm_slot_width;
590 tdm_slots = cl->tdm_slots;
591 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
592 }
593
594 if (cl->bclk_ratio)
595 bclk_ratio = cl->bclk_ratio;
596 else
597 bclk_ratio = tdm_slot_width * tdm_slots;
598
599 if (refine_tdm) {
600 int nchannels = params_channels(params);
601
602 if (nchannels > 2) {
603 dev_err(dev, "missing TDM for stream with two or more channels\n");
604 return -EINVAL;
605 }
606
607 if ((bclk_ratio % nchannels) != 0) {
608 dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n",
609 bclk_ratio, nchannels);
610 return -EINVAL;
611 }
612
613 tdm_slot_width = bclk_ratio / nchannels;
614
615 if (tdm_slot_width > 32 && nchannels == 1)
616 tdm_slot_width = 32;
617
618 if (tdm_slot_width < params_width(params)) {
619 dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n",
620 tdm_slot_width, params_width(params));
621 return -EINVAL;
622 }
623
624 tdm_mask = (1 << tdm_slots) - 1;
625 }
626
627 port = mca_fe_get_port(substream);
628 if (port < 0)
629 return port;
630
631 ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
632 tdm_mask, tdm_slots, params_channels(params),
633 tdm_slot_width, is_tx, port);
634 if (ret)
635 return ret;
636
637 pad = 32 - params_width(params);
638
639 /*
640 * TODO: Here the register semantics aren't clear.
641 */
642 nchans_ceiled = min_t(int, params_channels(params), 4);
643 regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) |
644 FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) |
645 FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) |
646 FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) |
647 FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad);
648
649#ifndef USE_RXB_FOR_CAPTURE
650 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
651#else
652 if (is_tx)
653 writel_relaxed(regval,
654 mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
655 else
656 writel_relaxed(regval,
657 mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
658#endif
659
660 if (!mca_fe_clocks_in_use(cl)) {
661 /*
662 * Set up FSYNC duty cycle as even as possible.
663 */
664 writel_relaxed((bclk_ratio / 2) - 1,
665 cl->base + REG_SYNCGEN_HI_PERIOD);
666 writel_relaxed(((bclk_ratio + 1) / 2) - 1,
667 cl->base + REG_SYNCGEN_LO_PERIOD);
668 writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1),
669 cl->base + REG_MCLK_CONF);
670
671 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
672 if (ret) {
673 dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n",
674 cl->no, ret);
675 return ret;
676 }
677 }
678
679 return 0;
680}
681
682static const struct snd_soc_dai_ops mca_fe_ops = {
683 .set_fmt = mca_fe_set_fmt,
684 .set_bclk_ratio = mca_set_bclk_ratio,
685 .set_tdm_slot = mca_fe_set_tdm_slot,
686 .hw_params = mca_fe_hw_params,
687 .trigger = mca_fe_trigger,
688};
689
690static bool mca_be_started(struct mca_cluster *cl)
691{
692 int stream;
693
694 for_each_pcm_streams(stream)
695 if (cl->port_started[stream])
696 return true;
697 return false;
698}
699
700static int mca_be_startup(struct snd_pcm_substream *substream,
701 struct snd_soc_dai *dai)
702{
703 struct snd_soc_pcm_runtime *be = snd_soc_substream_to_rtd(substream);
704 struct snd_soc_pcm_runtime *fe;
705 struct mca_cluster *cl = mca_dai_to_cluster(dai);
706 struct mca_cluster *fe_cl;
707 struct mca_data *mca = cl->host;
708 struct snd_soc_dpcm *dpcm;
709
710 fe = NULL;
711
712 for_each_dpcm_fe(be, substream->stream, dpcm) {
713 if (fe && dpcm->fe != fe) {
714 dev_err(mca->dev, "many FE per one BE unsupported\n");
715 return -EINVAL;
716 }
717
718 fe = dpcm->fe;
719 }
720
721 if (!fe)
722 return -EINVAL;
723
724 fe_cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(fe, 0));
725
726 if (mca_be_started(cl)) {
727 /*
728 * Port is already started in the other direction.
729 * Make sure there isn't a conflict with another cluster
730 * driving the port.
731 */
732 if (cl->port_driver != fe_cl->no)
733 return -EINVAL;
734
735 cl->port_started[substream->stream] = true;
736 return 0;
737 }
738
739 writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA,
740 cl->base + REG_PORT_ENABLES);
741 writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
742 cl->base + REG_PORT_CLOCK_SEL);
743 writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
744 cl->base + REG_PORT_DATA_SEL);
745 mutex_lock(&mca->port_mutex);
746 cl->port_driver = fe_cl->no;
747 mutex_unlock(&mca->port_mutex);
748 cl->port_started[substream->stream] = true;
749
750 return 0;
751}
752
753static void mca_be_shutdown(struct snd_pcm_substream *substream,
754 struct snd_soc_dai *dai)
755{
756 struct mca_cluster *cl = mca_dai_to_cluster(dai);
757 struct mca_data *mca = cl->host;
758
759 cl->port_started[substream->stream] = false;
760
761 if (!mca_be_started(cl)) {
762 /*
763 * Were we the last direction to shutdown?
764 * Turn off the lights.
765 */
766 writel_relaxed(0, cl->base + REG_PORT_ENABLES);
767 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
768 mutex_lock(&mca->port_mutex);
769 cl->port_driver = -1;
770 mutex_unlock(&mca->port_mutex);
771 }
772}
773
774static const struct snd_soc_dai_ops mca_be_ops = {
775 .prepare = mca_be_prepare,
776 .hw_free = mca_be_hw_free,
777 .startup = mca_be_startup,
778 .shutdown = mca_be_shutdown,
779};
780
781static int mca_set_runtime_hwparams(struct snd_soc_component *component,
782 struct snd_pcm_substream *substream,
783 struct dma_chan *chan)
784{
785 struct device *dma_dev = chan->device->dev;
786 struct snd_dmaengine_dai_dma_data dma_data = {};
787 int ret;
788
789 struct snd_pcm_hardware hw;
790
791 memset(&hw, 0, sizeof(hw));
792
793 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
794 SNDRV_PCM_INFO_INTERLEAVED;
795 hw.periods_min = 2;
796 hw.periods_max = UINT_MAX;
797 hw.period_bytes_min = 256;
798 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
799 hw.buffer_bytes_max = SIZE_MAX;
800 hw.fifo_size = 16;
801
802 ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data,
803 &hw, chan);
804
805 if (ret)
806 return ret;
807
808 return snd_soc_set_runtime_hwparams(substream, &hw);
809}
810
811static int mca_pcm_open(struct snd_soc_component *component,
812 struct snd_pcm_substream *substream)
813{
814 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
815 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
816 struct dma_chan *chan = cl->dma_chans[substream->stream];
817 int ret;
818
819 if (rtd->dai_link->no_pcm)
820 return 0;
821
822 ret = mca_set_runtime_hwparams(component, substream, chan);
823 if (ret)
824 return ret;
825
826 return snd_dmaengine_pcm_open(substream, chan);
827}
828
829static int mca_hw_params(struct snd_soc_component *component,
830 struct snd_pcm_substream *substream,
831 struct snd_pcm_hw_params *params)
832{
833 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
834 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
835 struct dma_slave_config slave_config;
836 int ret;
837
838 if (rtd->dai_link->no_pcm)
839 return 0;
840
841 memset(&slave_config, 0, sizeof(slave_config));
842 ret = snd_hwparams_to_dma_slave_config(substream, params,
843 &slave_config);
844 if (ret < 0)
845 return ret;
846
847 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
848 slave_config.dst_port_window_size =
849 min_t(u32, params_channels(params), 4);
850 else
851 slave_config.src_port_window_size =
852 min_t(u32, params_channels(params), 4);
853
854 return dmaengine_slave_config(chan, &slave_config);
855}
856
857static int mca_close(struct snd_soc_component *component,
858 struct snd_pcm_substream *substream)
859{
860 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
861
862 if (rtd->dai_link->no_pcm)
863 return 0;
864
865 return snd_dmaengine_pcm_close(substream);
866}
867
868static int mca_trigger(struct snd_soc_component *component,
869 struct snd_pcm_substream *substream, int cmd)
870{
871 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
872
873 if (rtd->dai_link->no_pcm)
874 return 0;
875
876 /*
877 * Before we do the PCM trigger proper, insert an opportunity
878 * to reset the frontend's SERDES.
879 */
880 mca_fe_early_trigger(substream, cmd, snd_soc_rtd_to_cpu(rtd, 0));
881
882 return snd_dmaengine_pcm_trigger(substream, cmd);
883}
884
885static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component,
886 struct snd_pcm_substream *substream)
887{
888 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
889
890 if (rtd->dai_link->no_pcm)
891 return -ENOTSUPP;
892
893 return snd_dmaengine_pcm_pointer(substream);
894}
895
896static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream)
897{
898 bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
899#ifndef USE_RXB_FOR_CAPTURE
900 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
901 is_tx ? "tx%da" : "rx%da", cl->no);
902#else
903 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
904 is_tx ? "tx%da" : "rx%db", cl->no);
905#endif
906 return of_dma_request_slave_channel(cl->host->dev->of_node, name);
907
908}
909
910static void mca_pcm_free(struct snd_soc_component *component,
911 struct snd_pcm *pcm)
912{
913 struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm);
914 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
915 unsigned int i;
916
917 if (rtd->dai_link->no_pcm)
918 return;
919
920 for_each_pcm_streams(i) {
921 struct snd_pcm_substream *substream =
922 rtd->pcm->streams[i].substream;
923
924 if (!substream || !cl->dma_chans[i])
925 continue;
926
927 dma_release_channel(cl->dma_chans[i]);
928 cl->dma_chans[i] = NULL;
929 }
930}
931
932
933static int mca_pcm_new(struct snd_soc_component *component,
934 struct snd_soc_pcm_runtime *rtd)
935{
936 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
937 unsigned int i;
938
939 if (rtd->dai_link->no_pcm)
940 return 0;
941
942 for_each_pcm_streams(i) {
943 struct snd_pcm_substream *substream =
944 rtd->pcm->streams[i].substream;
945 struct dma_chan *chan;
946
947 if (!substream)
948 continue;
949
950 chan = mca_request_dma_channel(cl, i);
951
952 if (IS_ERR_OR_NULL(chan)) {
953 mca_pcm_free(component, rtd->pcm);
954
955 if (chan && PTR_ERR(chan) == -EPROBE_DEFER)
956 return PTR_ERR(chan);
957
958 dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
959 i, cl->no, chan);
960
961 if (!chan)
962 return -EINVAL;
963 return PTR_ERR(chan);
964 }
965
966 cl->dma_chans[i] = chan;
967 snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM,
968 chan->device->dev, 512 * 1024 * 6,
969 SIZE_MAX);
970 }
971
972 return 0;
973}
974
975static const struct snd_soc_component_driver mca_component = {
976 .name = "apple-mca",
977 .open = mca_pcm_open,
978 .close = mca_close,
979 .hw_params = mca_hw_params,
980 .trigger = mca_trigger,
981 .pointer = mca_pointer,
982 .pcm_construct = mca_pcm_new,
983 .pcm_destruct = mca_pcm_free,
984};
985
986static void apple_mca_release(struct mca_data *mca)
987{
988 int i;
989
990 for (i = 0; i < mca->nclusters; i++) {
991 struct mca_cluster *cl = &mca->clusters[i];
992
993 if (!IS_ERR_OR_NULL(cl->clk_parent))
994 clk_put(cl->clk_parent);
995
996 if (!IS_ERR_OR_NULL(cl->pd_dev))
997 dev_pm_domain_detach(cl->pd_dev, true);
998 }
999
1000 if (mca->pd_link)
1001 device_link_del(mca->pd_link);
1002
1003 if (!IS_ERR_OR_NULL(mca->pd_dev))
1004 dev_pm_domain_detach(mca->pd_dev, true);
1005
1006 reset_control_rearm(mca->rstc);
1007}
1008
1009static int apple_mca_probe(struct platform_device *pdev)
1010{
1011 struct mca_data *mca;
1012 struct mca_cluster *clusters;
1013 struct snd_soc_dai_driver *dai_drivers;
1014 struct resource *res;
1015 void __iomem *base;
1016 int nclusters;
1017 int ret, i;
1018
1019 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1020 if (IS_ERR(base))
1021 return PTR_ERR(base);
1022
1023 if (resource_size(res) < CLUSTER_STRIDE)
1024 return -EINVAL;
1025 nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1;
1026
1027 mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters),
1028 GFP_KERNEL);
1029 if (!mca)
1030 return -ENOMEM;
1031 mca->dev = &pdev->dev;
1032 mca->nclusters = nclusters;
1033 mutex_init(&mca->port_mutex);
1034 platform_set_drvdata(pdev, mca);
1035 clusters = mca->clusters;
1036
1037 mca->switch_base =
1038 devm_platform_ioremap_resource(pdev, 1);
1039 if (IS_ERR(mca->switch_base))
1040 return PTR_ERR(mca->switch_base);
1041
1042 mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
1043 if (IS_ERR(mca->rstc))
1044 return PTR_ERR(mca->rstc);
1045
1046 dai_drivers = devm_kzalloc(
1047 &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL);
1048 if (!dai_drivers)
1049 return -ENOMEM;
1050
1051 mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0);
1052 if (IS_ERR(mca->pd_dev))
1053 return -EINVAL;
1054
1055 mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev,
1056 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
1057 DL_FLAG_RPM_ACTIVE);
1058 if (!mca->pd_link) {
1059 ret = -EINVAL;
1060 /* Prevent an unbalanced reset rearm */
1061 mca->rstc = NULL;
1062 goto err_release;
1063 }
1064
1065 reset_control_reset(mca->rstc);
1066
1067 for (i = 0; i < nclusters; i++) {
1068 struct mca_cluster *cl = &clusters[i];
1069 struct snd_soc_dai_driver *fe =
1070 &dai_drivers[mca->nclusters + i];
1071 struct snd_soc_dai_driver *be = &dai_drivers[i];
1072
1073 cl->host = mca;
1074 cl->no = i;
1075 cl->base = base + CLUSTER_STRIDE * i;
1076 cl->port_driver = -1;
1077 cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
1078 if (IS_ERR(cl->clk_parent)) {
1079 dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
1080 i, PTR_ERR(cl->clk_parent));
1081 ret = PTR_ERR(cl->clk_parent);
1082 goto err_release;
1083 }
1084 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
1085 if (IS_ERR(cl->pd_dev)) {
1086 dev_err(&pdev->dev,
1087 "unable to obtain cluster %d PD: %ld\n", i,
1088 PTR_ERR(cl->pd_dev));
1089 ret = PTR_ERR(cl->pd_dev);
1090 goto err_release;
1091 }
1092
1093 fe->id = i;
1094 fe->name =
1095 devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i);
1096 if (!fe->name) {
1097 ret = -ENOMEM;
1098 goto err_release;
1099 }
1100 fe->ops = &mca_fe_ops;
1101 fe->playback.channels_min = 1;
1102 fe->playback.channels_max = 32;
1103 fe->playback.rates = SNDRV_PCM_RATE_8000_192000;
1104 fe->playback.formats = APPLE_MCA_FMTBITS;
1105 fe->capture.channels_min = 1;
1106 fe->capture.channels_max = 32;
1107 fe->capture.rates = SNDRV_PCM_RATE_8000_192000;
1108 fe->capture.formats = APPLE_MCA_FMTBITS;
1109 fe->symmetric_rate = 1;
1110
1111 fe->playback.stream_name =
1112 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i);
1113 fe->capture.stream_name =
1114 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i);
1115
1116 if (!fe->playback.stream_name || !fe->capture.stream_name) {
1117 ret = -ENOMEM;
1118 goto err_release;
1119 }
1120
1121 be->id = i + nclusters;
1122 be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i);
1123 if (!be->name) {
1124 ret = -ENOMEM;
1125 goto err_release;
1126 }
1127 be->ops = &mca_be_ops;
1128 be->playback.channels_min = 1;
1129 be->playback.channels_max = 32;
1130 be->playback.rates = SNDRV_PCM_RATE_8000_192000;
1131 be->playback.formats = APPLE_MCA_FMTBITS;
1132 be->capture.channels_min = 1;
1133 be->capture.channels_max = 32;
1134 be->capture.rates = SNDRV_PCM_RATE_8000_192000;
1135 be->capture.formats = APPLE_MCA_FMTBITS;
1136
1137 be->playback.stream_name =
1138 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i);
1139 be->capture.stream_name =
1140 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i);
1141 if (!be->playback.stream_name || !be->capture.stream_name) {
1142 ret = -ENOMEM;
1143 goto err_release;
1144 }
1145 }
1146
1147 ret = snd_soc_register_component(&pdev->dev, &mca_component,
1148 dai_drivers, nclusters * 2);
1149 if (ret) {
1150 dev_err(&pdev->dev, "unable to register ASoC component: %d\n",
1151 ret);
1152 goto err_release;
1153 }
1154
1155 return 0;
1156
1157err_release:
1158 apple_mca_release(mca);
1159 return ret;
1160}
1161
1162static void apple_mca_remove(struct platform_device *pdev)
1163{
1164 struct mca_data *mca = platform_get_drvdata(pdev);
1165
1166 snd_soc_unregister_component(&pdev->dev);
1167 apple_mca_release(mca);
1168}
1169
1170static const struct of_device_id apple_mca_of_match[] = {
1171 { .compatible = "apple,mca", },
1172 {}
1173};
1174MODULE_DEVICE_TABLE(of, apple_mca_of_match);
1175
1176static struct platform_driver apple_mca_driver = {
1177 .driver = {
1178 .name = "apple-mca",
1179 .of_match_table = apple_mca_of_match,
1180 },
1181 .probe = apple_mca_probe,
1182 .remove_new = apple_mca_remove,
1183};
1184module_platform_driver(apple_mca_driver);
1185
1186MODULE_AUTHOR("Martin PoviĊĦer <povik+lin@cutebit.org>");
1187MODULE_DESCRIPTION("ASoC Apple MCA driver");
1188MODULE_LICENSE("GPL");