Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2//
  3// Copyright(c) 2021-2022 Intel Corporation
  4//
  5// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
  6//          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
  7//
  8// Special thanks to:
  9//    Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
 10//    Michal Sienkiewicz <michal.sienkiewicz@intel.com>
 11//    Filip Proborszcz
 12//
 13// for sharing Intel AudioDSP expertise and helping shape the very
 14// foundation of this driver
 15//
 16
 17#include <linux/acpi.h>
 18#include <linux/module.h>
 19#include <linux/pci.h>
 20#include <acpi/nhlt.h>
 21#include <sound/hda_codec.h>
 22#include <sound/hda_i915.h>
 23#include <sound/hda_register.h>
 24#include <sound/hdaudio.h>
 25#include <sound/hdaudio_ext.h>
 26#include <sound/intel-dsp-config.h>
 
 27#include "../../codecs/hda.h"
 28#include "avs.h"
 29#include "cldma.h"
 30#include "messages.h"
 31#include "pcm.h"
 32
 33static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
 34module_param(pgctl_mask, uint, 0444);
 35MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
 36
 37static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
 38module_param(cgctl_mask, uint, 0444);
 39MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
 40
 41static void
 42avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
 43{
 44	struct pci_dev *pci = to_pci_dev(bus->dev);
 45	u32 data;
 46
 47	pci_read_config_dword(pci, reg, &data);
 48	data &= ~mask;
 49	data |= (value & mask);
 50	pci_write_config_dword(pci, reg, data);
 51}
 52
 53void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
 54{
 55	u32 value = enable ? 0 : pgctl_mask;
 56
 57	avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
 58}
 59
 60static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
 61{
 62	u32 value = enable ? cgctl_mask : 0;
 63
 64	avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
 65}
 66
 67void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
 68{
 69	avs_hdac_clock_gating_enable(&adev->base.core, enable);
 70}
 71
 72void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
 73{
 74	if (enable) {
 75		if (atomic_inc_and_test(&adev->l1sen_counter))
 76			snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN,
 77					      AZX_VS_EM2_L1SEN);
 78	} else {
 79		if (atomic_dec_return(&adev->l1sen_counter) == -1)
 80			snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0);
 81	}
 82}
 83
 84static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
 85{
 86	unsigned int cp_streams, pb_streams;
 87	unsigned int gcap;
 88
 89	gcap = snd_hdac_chip_readw(bus, GCAP);
 90	cp_streams = (gcap >> 8) & 0x0F;
 91	pb_streams = (gcap >> 12) & 0x0F;
 92	bus->num_streams = cp_streams + pb_streams;
 93
 94	snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
 95	snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
 96
 97	return snd_hdac_bus_alloc_stream_pages(bus);
 98}
 99
100static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
101{
102	struct hdac_ext_link *hlink;
103	bool ret;
104
105	avs_hdac_clock_gating_enable(bus, false);
106	ret = snd_hdac_bus_init_chip(bus, full_reset);
107
108	/* Reset stream-to-link mapping */
109	list_for_each_entry(hlink, &bus->hlink_list, list)
110		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
111
112	avs_hdac_clock_gating_enable(bus, true);
113
114	/* Set DUM bit to address incorrect position reporting for capture
115	 * streams. In order to do so, CTRL needs to be out of reset state
116	 */
117	snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
118
119	return ret;
120}
121
122static int probe_codec(struct hdac_bus *bus, int addr)
123{
124	struct hda_codec *codec;
125	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
126			   (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
127	unsigned int res = -1;
128	int ret;
129
130	mutex_lock(&bus->cmd_mutex);
131	snd_hdac_bus_send_cmd(bus, cmd);
132	snd_hdac_bus_get_response(bus, addr, &res);
133	mutex_unlock(&bus->cmd_mutex);
134	if (res == -1)
135		return -EIO;
136
137	dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
138
139	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
140	if (IS_ERR(codec)) {
141		dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
142		return PTR_ERR(codec);
143	}
144	/*
145	 * Allow avs_core suspend by forcing suspended state on all
146	 * of its codec child devices. Component interested in
147	 * dealing with hda codecs directly takes pm responsibilities
148	 */
149	pm_runtime_set_suspended(hda_codec_dev(codec));
150
151	/* configure effectively creates new ASoC component */
152	ret = snd_hda_codec_configure(codec);
153	if (ret < 0) {
154		dev_warn(bus->dev, "failed to config codec #%d: %d\n", addr, ret);
155		return ret;
156	}
157
158	return 0;
159}
160
161static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
162{
163	int ret, c;
164
165	/* First try to probe all given codec slots */
166	for (c = 0; c < HDA_MAX_CODECS; c++) {
167		if (!(bus->codec_mask & BIT(c)))
168			continue;
169
170		ret = probe_codec(bus, c);
171		/* Ignore codecs with no supporting driver. */
172		if (!ret || ret == -ENODEV)
173			continue;
174
175		/*
176		 * Some BIOSen give you wrong codec addresses
177		 * that don't exist
178		 */
179		dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
180		bus->codec_mask &= ~BIT(c);
181		/*
182		 * More badly, accessing to a non-existing
183		 * codec often screws up the controller bus,
184		 * and disturbs the further communications.
185		 * Thus if an error occurs during probing,
186		 * better to reset the controller bus to get
187		 * back to the sanity state.
188		 */
189		snd_hdac_bus_stop_chip(bus);
190		avs_hdac_bus_init_chip(bus, true);
191	}
192}
193
194static void avs_hda_probe_work(struct work_struct *work)
195{
196	struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
197	struct hdac_bus *bus = &adev->base.core;
198	struct hdac_ext_link *hlink;
199	int ret;
200
201	pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
202
203	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
204	avs_hdac_bus_init_chip(bus, true);
205	avs_hdac_bus_probe_codecs(bus);
206	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
207
208	/* with all codecs probed, links can be powered down */
209	list_for_each_entry(hlink, &bus->hlink_list, list)
210		snd_hdac_ext_bus_link_put(bus, hlink);
211
212	snd_hdac_ext_bus_ppcap_enable(bus, true);
213	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
214	avs_debugfs_init(adev);
215
216	ret = avs_dsp_first_boot_firmware(adev);
217	if (ret < 0)
218		return;
219
220	acpi_nhlt_get_gbl_table();
 
 
 
221
222	avs_register_all_boards(adev);
223
224	/* configure PM */
225	pm_runtime_set_autosuspend_delay(bus->dev, 2000);
226	pm_runtime_use_autosuspend(bus->dev);
227	pm_runtime_mark_last_busy(bus->dev);
228	pm_runtime_put_autosuspend(bus->dev);
229	pm_runtime_allow(bus->dev);
230}
231
232static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
233{
234	u64 prev_pos, pos, num_bytes;
235
236	div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
237	pos = snd_hdac_stream_get_pos_posbuf(stream);
238
239	if (pos < prev_pos)
240		num_bytes = (buffer_size - prev_pos) +  pos;
241	else
242		num_bytes = pos - prev_pos;
243
244	stream->curr_pos += num_bytes;
245}
246
247/* called from IRQ */
248static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
249{
250	if (stream->substream) {
251		avs_period_elapsed(stream->substream);
252	} else if (stream->cstream) {
253		u64 buffer_size = stream->cstream->runtime->buffer_size;
254
255		hdac_stream_update_pos(stream, buffer_size);
256		snd_compr_fragment_elapsed(stream->cstream);
257	}
258}
259
260static irqreturn_t avs_hda_interrupt(struct hdac_bus *bus)
261{
262	irqreturn_t ret = IRQ_NONE;
 
263	u32 status;
 
 
 
 
 
 
264
265	status = snd_hdac_chip_readl(bus, INTSTS);
266	if (snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream))
267		ret = IRQ_HANDLED;
 
 
268
269	spin_lock_irq(&bus->reg_lock);
270	/* Clear RIRB interrupt. */
271	status = snd_hdac_chip_readb(bus, RIRBSTS);
272	if (status & RIRB_INT_MASK) {
273		if (status & RIRB_INT_RESPONSE)
274			snd_hdac_bus_update_rirb(bus);
275		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
276		ret = IRQ_HANDLED;
277	}
278
279	spin_unlock_irq(&bus->reg_lock);
280	return ret;
281}
282
283static irqreturn_t avs_hda_irq_handler(int irq, void *dev_id)
284{
285	struct hdac_bus *bus = dev_id;
286	u32 intsts;
287
288	intsts = snd_hdac_chip_readl(bus, INTSTS);
289	if (intsts == UINT_MAX || !(intsts & AZX_INT_GLOBAL_EN))
290		return IRQ_NONE;
291
292	/* Mask GIE, unmasked in irq_thread(). */
293	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, 0);
294
295	return IRQ_WAKE_THREAD;
 
296}
297
298static irqreturn_t avs_hda_irq_thread(int irq, void *dev_id)
299{
300	struct hdac_bus *bus = dev_id;
301	u32 status;
 
 
 
302
303	status = snd_hdac_chip_readl(bus, INTSTS);
304	if (status & ~AZX_INT_GLOBAL_EN)
305		avs_hda_interrupt(bus);
306
307	/* Unmask GIE, masked in irq_handler(). */
308	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
309
310	return IRQ_HANDLED;
311}
312
313static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
314{
315	struct avs_dev *adev = dev_id;
316
317	return avs_hda_irq_handler(irq, &adev->base.core);
318}
319
320static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
321{
322	struct avs_dev *adev = dev_id;
323	struct hdac_bus *bus = &adev->base.core;
324	u32 status;
325
326	status = readl(bus->ppcap + AZX_REG_PP_PPSTS);
327	if (status & AZX_PPCTL_PIE)
328		avs_dsp_op(adev, dsp_interrupt);
329
330	/* Unmask GIE, masked in irq_handler(). */
331	snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
332
333	return IRQ_HANDLED;
334}
335
336static int avs_hdac_acquire_irq(struct avs_dev *adev)
337{
338	struct hdac_bus *bus = &adev->base.core;
339	struct pci_dev *pci = to_pci_dev(bus->dev);
340	int ret;
341
342	/* request one and check that we only got one interrupt */
343	ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX);
344	if (ret != 1) {
345		dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
346		return ret;
347	}
348
349	ret = pci_request_irq(pci, 0, avs_hda_irq_handler, avs_hda_irq_thread, bus,
350			      KBUILD_MODNAME);
351	if (ret < 0) {
352		dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
353		goto free_vector;
354	}
355
356	ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
357			      KBUILD_MODNAME);
358	if (ret < 0) {
359		dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
360		goto free_stream_irq;
361	}
362
363	return 0;
364
365free_stream_irq:
366	pci_free_irq(pci, 0, bus);
367free_vector:
368	pci_free_irq_vectors(pci);
369	return ret;
370}
371
372static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
373{
374	struct hda_bus *bus = &adev->base;
375	struct avs_ipc *ipc;
376	struct device *dev = &pci->dev;
377	int ret;
378
379	ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
380	if (ret < 0)
381		return ret;
382
383	bus->core.use_posbuf = 1;
384	bus->core.bdl_pos_adj = 0;
385	bus->core.sync_write = 1;
386	bus->pci = pci;
387	bus->mixer_assigned = -1;
388	mutex_init(&bus->prepare_mutex);
389
390	ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
391	if (!ipc)
392		return -ENOMEM;
393	ret = avs_ipc_init(ipc, dev);
394	if (ret < 0)
395		return ret;
396
397	adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
398	if (!adev->modcfg_buf)
399		return -ENOMEM;
400
401	adev->dev = dev;
402	adev->spec = (const struct avs_spec *)id->driver_data;
403	adev->ipc = ipc;
404	adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
405	INIT_WORK(&adev->probe_work, avs_hda_probe_work);
406	INIT_LIST_HEAD(&adev->comp_list);
407	INIT_LIST_HEAD(&adev->path_list);
408	INIT_LIST_HEAD(&adev->fw_list);
409	init_completion(&adev->fw_ready);
410	spin_lock_init(&adev->path_list_lock);
411	mutex_init(&adev->modres_mutex);
412	mutex_init(&adev->comp_list_mutex);
413	mutex_init(&adev->path_mutex);
414
415	return 0;
416}
417
418static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
419{
420	struct hdac_bus *bus;
421	struct avs_dev *adev;
422	struct device *dev = &pci->dev;
423	int ret;
424
425	ret = snd_intel_dsp_driver_probe(pci);
426	switch (ret) {
427	case SND_INTEL_DSP_DRIVER_ANY:
428	case SND_INTEL_DSP_DRIVER_SST:
429	case SND_INTEL_DSP_DRIVER_AVS:
430		break;
431	default:
432		return -ENODEV;
433	}
434
435	ret = pcim_enable_device(pci);
436	if (ret < 0)
437		return ret;
438
439	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
440	if (!adev)
441		return -ENOMEM;
442	ret = avs_bus_init(adev, pci, id);
443	if (ret < 0) {
444		dev_err(dev, "failed to init avs bus: %d\n", ret);
445		return ret;
446	}
447
448	ret = pci_request_regions(pci, "AVS HDAudio");
449	if (ret < 0)
450		return ret;
451
452	bus = &adev->base.core;
453	bus->addr = pci_resource_start(pci, 0);
454	bus->remap_addr = pci_ioremap_bar(pci, 0);
455	if (!bus->remap_addr) {
456		dev_err(bus->dev, "ioremap error\n");
457		ret = -ENXIO;
458		goto err_remap_bar0;
459	}
460
461	adev->dsp_ba = pci_ioremap_bar(pci, 4);
462	if (!adev->dsp_ba) {
463		dev_err(bus->dev, "ioremap error\n");
464		ret = -ENXIO;
465		goto err_remap_bar4;
466	}
467
468	snd_hdac_bus_parse_capabilities(bus);
469	if (bus->mlcap)
470		snd_hdac_ext_bus_get_ml_capabilities(bus);
471
472	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
473		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
474	dma_set_max_seg_size(dev, UINT_MAX);
475
476	ret = avs_hdac_bus_init_streams(bus);
477	if (ret < 0) {
478		dev_err(dev, "failed to init streams: %d\n", ret);
479		goto err_init_streams;
480	}
481
482	ret = avs_hdac_acquire_irq(adev);
483	if (ret < 0) {
484		dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
485		goto err_acquire_irq;
486	}
487
488	pci_set_master(pci);
489	pci_set_drvdata(pci, bus);
490	device_disable_async_suspend(dev);
491
492	ret = snd_hdac_i915_init(bus);
493	if (ret == -EPROBE_DEFER)
494		goto err_i915_init;
495	else if (ret < 0)
496		dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
497
498	schedule_work(&adev->probe_work);
499
500	return 0;
501
502err_i915_init:
503	pci_free_irq(pci, 0, adev);
504	pci_free_irq(pci, 0, bus);
505	pci_free_irq_vectors(pci);
506	pci_clear_master(pci);
507	pci_set_drvdata(pci, NULL);
508err_acquire_irq:
509	snd_hdac_bus_free_stream_pages(bus);
510	snd_hdac_ext_stream_free_all(bus);
511err_init_streams:
512	iounmap(adev->dsp_ba);
513err_remap_bar4:
514	iounmap(bus->remap_addr);
515err_remap_bar0:
516	pci_release_regions(pci);
517	return ret;
518}
519
520static void avs_pci_shutdown(struct pci_dev *pci)
521{
522	struct hdac_bus *bus = pci_get_drvdata(pci);
523	struct avs_dev *adev = hdac_to_avs(bus);
524
525	cancel_work_sync(&adev->probe_work);
526	avs_ipc_block(adev->ipc);
527
528	snd_hdac_stop_streams(bus);
529	avs_dsp_op(adev, int_control, false);
530	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
531	snd_hdac_ext_bus_link_power_down_all(bus);
532
533	snd_hdac_bus_stop_chip(bus);
534	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
535
 
 
536	pci_free_irq(pci, 0, adev);
537	pci_free_irq(pci, 0, bus);
538	pci_free_irq_vectors(pci);
539}
540
541static void avs_pci_remove(struct pci_dev *pci)
542{
543	struct hdac_device *hdev, *save;
544	struct hdac_bus *bus = pci_get_drvdata(pci);
545	struct avs_dev *adev = hdac_to_avs(bus);
546
547	cancel_work_sync(&adev->probe_work);
548	avs_ipc_block(adev->ipc);
549
550	avs_unregister_all_boards(adev);
551
552	acpi_nhlt_put_gbl_table();
553	avs_debugfs_exit(adev);
 
 
554
555	if (avs_platattr_test(adev, CLDMA))
556		hda_cldma_free(&code_loader);
557
558	snd_hdac_stop_streams_and_chip(bus);
559	avs_dsp_op(adev, int_control, false);
560	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
561
562	/* it is safe to remove all codecs from the system now */
563	list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
564		snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
565
566	snd_hdac_bus_free_stream_pages(bus);
567	snd_hdac_ext_stream_free_all(bus);
568	/* reverse ml_capabilities */
569	snd_hdac_ext_link_free_all(bus);
570	snd_hdac_ext_bus_exit(bus);
571
572	avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
573	snd_hdac_ext_bus_ppcap_enable(bus, false);
574
575	/* snd_hdac_stop_streams_and_chip does that already? */
576	snd_hdac_bus_stop_chip(bus);
577	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
578	if (bus->audio_component)
579		snd_hdac_i915_exit(bus);
580
581	avs_module_info_free(adev);
582	pci_free_irq(pci, 0, adev);
583	pci_free_irq(pci, 0, bus);
584	pci_free_irq_vectors(pci);
585	iounmap(bus->remap_addr);
586	iounmap(adev->dsp_ba);
587	pci_release_regions(pci);
588
589	/* Firmware is not needed anymore */
590	avs_release_firmwares(adev);
591
592	/* pm_runtime_forbid() can rpm_resume() which we do not want */
593	pm_runtime_disable(&pci->dev);
594	pm_runtime_forbid(&pci->dev);
595	pm_runtime_enable(&pci->dev);
596	pm_runtime_get_noresume(&pci->dev);
597}
598
599static int avs_suspend_standby(struct avs_dev *adev)
600{
601	struct hdac_bus *bus = &adev->base.core;
602	struct pci_dev *pci = adev->base.pci;
603
604	if (bus->cmd_dma_state)
605		snd_hdac_bus_stop_cmd_io(bus);
606
607	snd_hdac_ext_bus_link_power_down_all(bus);
608
609	enable_irq_wake(pci->irq);
610	pci_save_state(pci);
611
612	return 0;
613}
614
615static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power)
616{
617	struct hdac_bus *bus = &adev->base.core;
618	int ret;
619
620	flush_work(&adev->probe_work);
621	if (low_power && adev->num_lp_paths)
622		return avs_suspend_standby(adev);
623
624	snd_hdac_ext_bus_link_power_down_all(bus);
625
626	ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
627	/*
628	 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
629	 * Do not block entire system from suspending if that's the case.
630	 */
631	if (ret && ret != -EPERM) {
632		dev_err(adev->dev, "set dx failed: %d\n", ret);
633		return AVS_IPC_RET(ret);
634	}
635
636	avs_ipc_block(adev->ipc);
637	avs_dsp_op(adev, int_control, false);
638	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
639
640	ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
641	if (ret < 0) {
642		dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
643		return ret;
644	}
645
646	snd_hdac_ext_bus_ppcap_enable(bus, false);
647	/* disable LP SRAM retention */
648	avs_hda_power_gating_enable(adev, false);
649	snd_hdac_bus_stop_chip(bus);
650	/* disable CG when putting controller to reset */
651	avs_hdac_clock_gating_enable(bus, false);
652	snd_hdac_bus_enter_link_reset(bus);
653	avs_hdac_clock_gating_enable(bus, true);
654
655	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
656
657	return 0;
658}
659
660static int avs_resume_standby(struct avs_dev *adev)
661{
662	struct hdac_bus *bus = &adev->base.core;
663	struct pci_dev *pci = adev->base.pci;
664
665	pci_restore_state(pci);
666	disable_irq_wake(pci->irq);
667
668	snd_hdac_ext_bus_link_power_up_all(bus);
669
670	if (bus->cmd_dma_state)
671		snd_hdac_bus_init_cmd_io(bus);
672
673	return 0;
674}
675
676static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
677{
678	struct hdac_bus *bus = &adev->base.core;
679	int ret;
680
681	if (low_power && adev->num_lp_paths)
682		return avs_resume_standby(adev);
683
684	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
685	avs_hdac_bus_init_chip(bus, true);
686
687	snd_hdac_ext_bus_ppcap_enable(bus, true);
688	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
689
690	ret = avs_dsp_boot_firmware(adev, purge);
691	if (ret < 0) {
692		dev_err(adev->dev, "firmware boot failed: %d\n", ret);
693		return ret;
694	}
695
696	return 0;
697}
698
699static int __maybe_unused avs_suspend(struct device *dev)
700{
701	return avs_suspend_common(to_avs_dev(dev), true);
702}
703
704static int __maybe_unused avs_resume(struct device *dev)
705{
706	return avs_resume_common(to_avs_dev(dev), true, true);
707}
708
709static int __maybe_unused avs_runtime_suspend(struct device *dev)
710{
711	return avs_suspend_common(to_avs_dev(dev), true);
712}
713
714static int __maybe_unused avs_runtime_resume(struct device *dev)
715{
716	return avs_resume_common(to_avs_dev(dev), true, false);
717}
718
719static int __maybe_unused avs_freeze(struct device *dev)
720{
721	return avs_suspend_common(to_avs_dev(dev), false);
722}
723static int __maybe_unused avs_thaw(struct device *dev)
724{
725	return avs_resume_common(to_avs_dev(dev), false, true);
726}
727
728static int __maybe_unused avs_poweroff(struct device *dev)
729{
730	return avs_suspend_common(to_avs_dev(dev), false);
731}
732
733static int __maybe_unused avs_restore(struct device *dev)
734{
735	return avs_resume_common(to_avs_dev(dev), false, true);
736}
737
738static const struct dev_pm_ops avs_dev_pm = {
739	.suspend = avs_suspend,
740	.resume = avs_resume,
741	.freeze = avs_freeze,
742	.thaw = avs_thaw,
743	.poweroff = avs_poweroff,
744	.restore = avs_restore,
745	SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
746};
747
748static const struct avs_sram_spec skl_sram_spec = {
749	.base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
750	.window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
751	.rom_status_offset = SKL_ADSP_SRAM_BASE_OFFSET,
752};
753
754static const struct avs_sram_spec apl_sram_spec = {
755	.base_offset = APL_ADSP_SRAM_BASE_OFFSET,
756	.window_size = APL_ADSP_SRAM_WINDOW_SIZE,
757	.rom_status_offset = APL_ADSP_SRAM_BASE_OFFSET,
758};
759
760static const struct avs_hipc_spec skl_hipc_spec = {
761	.req_offset = SKL_ADSP_REG_HIPCI,
762	.req_ext_offset = SKL_ADSP_REG_HIPCIE,
763	.req_busy_mask = SKL_ADSP_HIPCI_BUSY,
764	.ack_offset = SKL_ADSP_REG_HIPCIE,
765	.ack_done_mask = SKL_ADSP_HIPCIE_DONE,
766	.rsp_offset = SKL_ADSP_REG_HIPCT,
767	.rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
768	.ctl_offset = SKL_ADSP_REG_HIPCCTL,
769};
770
771static const struct avs_hipc_spec cnl_hipc_spec = {
772	.req_offset = CNL_ADSP_REG_HIPCIDR,
773	.req_ext_offset = CNL_ADSP_REG_HIPCIDD,
774	.req_busy_mask = CNL_ADSP_HIPCIDR_BUSY,
775	.ack_offset = CNL_ADSP_REG_HIPCIDA,
776	.ack_done_mask = CNL_ADSP_HIPCIDA_DONE,
777	.rsp_offset = CNL_ADSP_REG_HIPCTDR,
778	.rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY,
779	.ctl_offset = CNL_ADSP_REG_HIPCCTL,
780};
781
782static const struct avs_spec skl_desc = {
783	.name = "skl",
784	.min_fw_version = { 9, 21, 0, 4732 },
785	.dsp_ops = &avs_skl_dsp_ops,
 
 
 
 
 
786	.core_init_mask = 1,
787	.attributes = AVS_PLATATTR_CLDMA,
788	.sram = &skl_sram_spec,
789	.hipc = &skl_hipc_spec,
 
790};
791
792static const struct avs_spec apl_desc = {
793	.name = "apl",
794	.min_fw_version = { 9, 22, 1, 4323 },
795	.dsp_ops = &avs_apl_dsp_ops,
 
 
 
 
 
796	.core_init_mask = 3,
797	.attributes = AVS_PLATATTR_IMR,
798	.sram = &apl_sram_spec,
799	.hipc = &skl_hipc_spec,
800};
801
802static const struct avs_spec cnl_desc = {
803	.name = "cnl",
804	.min_fw_version = { 10, 23, 0, 5314 },
805	.dsp_ops = &avs_cnl_dsp_ops,
806	.core_init_mask = 1,
807	.attributes = AVS_PLATATTR_IMR,
808	.sram = &apl_sram_spec,
809	.hipc = &cnl_hipc_spec,
810};
811
812static const struct avs_spec icl_desc = {
813	.name = "icl",
814	.min_fw_version = { 10, 23, 0, 5040 },
815	.dsp_ops = &avs_icl_dsp_ops,
816	.core_init_mask = 1,
817	.attributes = AVS_PLATATTR_IMR,
818	.sram = &apl_sram_spec,
819	.hipc = &cnl_hipc_spec,
820};
821
822static const struct avs_spec jsl_desc = {
823	.name = "jsl",
824	.min_fw_version = { 10, 26, 0, 5872 },
825	.dsp_ops = &avs_icl_dsp_ops,
826	.core_init_mask = 1,
827	.attributes = AVS_PLATATTR_IMR,
828	.sram = &apl_sram_spec,
829	.hipc = &cnl_hipc_spec,
830};
831
832#define AVS_TGL_BASED_SPEC(sname, min)		\
833static const struct avs_spec sname##_desc = {	\
834	.name = #sname,				\
835	.min_fw_version = { 10,	min, 0, 5646 },	\
836	.dsp_ops = &avs_tgl_dsp_ops,		\
837	.core_init_mask = 1,			\
838	.attributes = AVS_PLATATTR_IMR,		\
839	.sram = &apl_sram_spec,			\
840	.hipc = &cnl_hipc_spec,			\
841}
842
843AVS_TGL_BASED_SPEC(lkf, 28);
844AVS_TGL_BASED_SPEC(tgl, 29);
845AVS_TGL_BASED_SPEC(ehl, 30);
846AVS_TGL_BASED_SPEC(adl, 35);
847AVS_TGL_BASED_SPEC(adl_n, 35);
848
849static const struct pci_device_id avs_ids[] = {
850	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
851	{ PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
852	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
853	{ PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
854	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
855	{ PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
856	{ PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
857	{ PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
858	{ PCI_DEVICE_DATA(INTEL, HDA_CNL_LP,	&cnl_desc) },
859	{ PCI_DEVICE_DATA(INTEL, HDA_CNL_H,	&cnl_desc) },
860	{ PCI_DEVICE_DATA(INTEL, HDA_CML_LP,	&cnl_desc) },
861	{ PCI_DEVICE_DATA(INTEL, HDA_CML_H,	&cnl_desc) },
862	{ PCI_DEVICE_DATA(INTEL, HDA_RKL_S,	&cnl_desc) },
863	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_LP,	&icl_desc) },
864	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_N,	&icl_desc) },
865	{ PCI_DEVICE_DATA(INTEL, HDA_ICL_H,	&icl_desc) },
866	{ PCI_DEVICE_DATA(INTEL, HDA_JSL_N,	&jsl_desc) },
867	{ PCI_DEVICE_DATA(INTEL, HDA_LKF,	&lkf_desc) },
868	{ PCI_DEVICE_DATA(INTEL, HDA_TGL_LP,	&tgl_desc) },
869	{ PCI_DEVICE_DATA(INTEL, HDA_TGL_H,	&tgl_desc) },
870	{ PCI_DEVICE_DATA(INTEL, HDA_CML_R,	&tgl_desc) },
871	{ PCI_DEVICE_DATA(INTEL, HDA_EHL_0,	&ehl_desc) },
872	{ PCI_DEVICE_DATA(INTEL, HDA_EHL_3,	&ehl_desc) },
873	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_S,	&adl_desc) },
874	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_P,	&adl_desc) },
875	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PS,	&adl_desc) },
876	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_M,	&adl_desc) },
877	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PX,	&adl_desc) },
878	{ PCI_DEVICE_DATA(INTEL, HDA_ADL_N,	&adl_n_desc) },
879	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_S,	&adl_desc) },
880	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0,	&adl_desc) },
881	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1,	&adl_desc) },
882	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_M,	&adl_desc) },
883	{ PCI_DEVICE_DATA(INTEL, HDA_RPL_PX,	&adl_desc) },
884	{ 0 }
885};
886MODULE_DEVICE_TABLE(pci, avs_ids);
887
888static struct pci_driver avs_pci_driver = {
889	.name = KBUILD_MODNAME,
890	.id_table = avs_ids,
891	.probe = avs_pci_probe,
892	.remove = avs_pci_remove,
893	.shutdown = avs_pci_shutdown,
894	.dev_groups = avs_attr_groups,
895	.driver = {
896		.pm = &avs_dev_pm,
897	},
898};
899module_pci_driver(avs_pci_driver);
900
901MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
902MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
903MODULE_DESCRIPTION("Intel cAVS sound driver");
904MODULE_LICENSE("GPL");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2//
  3// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
  4//
  5// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
  6//          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
  7//
  8// Special thanks to:
  9//    Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
 10//    Michal Sienkiewicz <michal.sienkiewicz@intel.com>
 11//    Filip Proborszcz
 12//
 13// for sharing Intel AudioDSP expertise and helping shape the very
 14// foundation of this driver
 15//
 16
 
 17#include <linux/module.h>
 18#include <linux/pci.h>
 
 19#include <sound/hda_codec.h>
 20#include <sound/hda_i915.h>
 21#include <sound/hda_register.h>
 22#include <sound/hdaudio.h>
 23#include <sound/hdaudio_ext.h>
 24#include <sound/intel-dsp-config.h>
 25#include <sound/intel-nhlt.h>
 26#include "../../codecs/hda.h"
 27#include "avs.h"
 28#include "cldma.h"
 29#include "messages.h"
 
 30
 31static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
 32module_param(pgctl_mask, uint, 0444);
 33MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
 34
 35static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
 36module_param(cgctl_mask, uint, 0444);
 37MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
 38
 39static void
 40avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
 41{
 42	struct pci_dev *pci = to_pci_dev(bus->dev);
 43	u32 data;
 44
 45	pci_read_config_dword(pci, reg, &data);
 46	data &= ~mask;
 47	data |= (value & mask);
 48	pci_write_config_dword(pci, reg, data);
 49}
 50
 51void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
 52{
 53	u32 value = enable ? 0 : pgctl_mask;
 54
 55	avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
 56}
 57
 58static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
 59{
 60	u32 value = enable ? cgctl_mask : 0;
 61
 62	avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
 63}
 64
 65void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
 66{
 67	avs_hdac_clock_gating_enable(&adev->base.core, enable);
 68}
 69
 70void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
 71{
 72	u32 value = enable ? AZX_VS_EM2_L1SEN : 0;
 73
 74	snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value);
 
 
 
 
 
 75}
 76
 77static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
 78{
 79	unsigned int cp_streams, pb_streams;
 80	unsigned int gcap;
 81
 82	gcap = snd_hdac_chip_readw(bus, GCAP);
 83	cp_streams = (gcap >> 8) & 0x0F;
 84	pb_streams = (gcap >> 12) & 0x0F;
 85	bus->num_streams = cp_streams + pb_streams;
 86
 87	snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
 88	snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
 89
 90	return snd_hdac_bus_alloc_stream_pages(bus);
 91}
 92
 93static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
 94{
 95	struct hdac_ext_link *hlink;
 96	bool ret;
 97
 98	avs_hdac_clock_gating_enable(bus, false);
 99	ret = snd_hdac_bus_init_chip(bus, full_reset);
100
101	/* Reset stream-to-link mapping */
102	list_for_each_entry(hlink, &bus->hlink_list, list)
103		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
104
105	avs_hdac_clock_gating_enable(bus, true);
106
107	/* Set DUM bit to address incorrect position reporting for capture
108	 * streams. In order to do so, CTRL needs to be out of reset state
109	 */
110	snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
111
112	return ret;
113}
114
115static int probe_codec(struct hdac_bus *bus, int addr)
116{
117	struct hda_codec *codec;
118	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
119			   (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
120	unsigned int res = -1;
121	int ret;
122
123	mutex_lock(&bus->cmd_mutex);
124	snd_hdac_bus_send_cmd(bus, cmd);
125	snd_hdac_bus_get_response(bus, addr, &res);
126	mutex_unlock(&bus->cmd_mutex);
127	if (res == -1)
128		return -EIO;
129
130	dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
131
132	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
133	if (IS_ERR(codec)) {
134		dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
135		return PTR_ERR(codec);
136	}
137	/*
138	 * Allow avs_core suspend by forcing suspended state on all
139	 * of its codec child devices. Component interested in
140	 * dealing with hda codecs directly takes pm responsibilities
141	 */
142	pm_runtime_set_suspended(hda_codec_dev(codec));
143
144	/* configure effectively creates new ASoC component */
145	ret = snd_hda_codec_configure(codec);
146	if (ret < 0) {
147		dev_err(bus->dev, "failed to config codec %d\n", ret);
148		return ret;
149	}
150
151	return 0;
152}
153
154static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
155{
156	int c;
157
158	/* First try to probe all given codec slots */
159	for (c = 0; c < HDA_MAX_CODECS; c++) {
160		if (!(bus->codec_mask & BIT(c)))
161			continue;
162
163		if (!probe_codec(bus, c))
164			/* success, continue probing */
 
165			continue;
166
167		/*
168		 * Some BIOSen give you wrong codec addresses
169		 * that don't exist
170		 */
171		dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
172		bus->codec_mask &= ~BIT(c);
173		/*
174		 * More badly, accessing to a non-existing
175		 * codec often screws up the controller bus,
176		 * and disturbs the further communications.
177		 * Thus if an error occurs during probing,
178		 * better to reset the controller bus to get
179		 * back to the sanity state.
180		 */
181		snd_hdac_bus_stop_chip(bus);
182		avs_hdac_bus_init_chip(bus, true);
183	}
184}
185
186static void avs_hda_probe_work(struct work_struct *work)
187{
188	struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
189	struct hdac_bus *bus = &adev->base.core;
190	struct hdac_ext_link *hlink;
191	int ret;
192
193	pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
194
195	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
196	avs_hdac_bus_init_chip(bus, true);
197	avs_hdac_bus_probe_codecs(bus);
198	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
199
200	/* with all codecs probed, links can be powered down */
201	list_for_each_entry(hlink, &bus->hlink_list, list)
202		snd_hdac_ext_bus_link_put(bus, hlink);
203
204	snd_hdac_ext_bus_ppcap_enable(bus, true);
205	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
 
206
207	ret = avs_dsp_first_boot_firmware(adev);
208	if (ret < 0)
209		return;
210
211	adev->nhlt = intel_nhlt_init(adev->dev);
212	if (!adev->nhlt)
213		dev_info(bus->dev, "platform has no NHLT\n");
214	avs_debugfs_init(adev);
215
216	avs_register_all_boards(adev);
217
218	/* configure PM */
219	pm_runtime_set_autosuspend_delay(bus->dev, 2000);
220	pm_runtime_use_autosuspend(bus->dev);
221	pm_runtime_mark_last_busy(bus->dev);
222	pm_runtime_put_autosuspend(bus->dev);
223	pm_runtime_allow(bus->dev);
224}
225
226static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
227{
228	u64 prev_pos, pos, num_bytes;
229
230	div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
231	pos = snd_hdac_stream_get_pos_posbuf(stream);
232
233	if (pos < prev_pos)
234		num_bytes = (buffer_size - prev_pos) +  pos;
235	else
236		num_bytes = pos - prev_pos;
237
238	stream->curr_pos += num_bytes;
239}
240
241/* called from IRQ */
242static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
243{
244	if (stream->substream) {
245		snd_pcm_period_elapsed(stream->substream);
246	} else if (stream->cstream) {
247		u64 buffer_size = stream->cstream->runtime->buffer_size;
248
249		hdac_stream_update_pos(stream, buffer_size);
250		snd_compr_fragment_elapsed(stream->cstream);
251	}
252}
253
254static irqreturn_t hdac_bus_irq_handler(int irq, void *context)
255{
256	struct hdac_bus *bus = context;
257	u32 mask, int_enable;
258	u32 status;
259	int ret = IRQ_NONE;
260
261	if (!pm_runtime_active(bus->dev))
262		return ret;
263
264	spin_lock(&bus->reg_lock);
265
266	status = snd_hdac_chip_readl(bus, INTSTS);
267	if (status == 0 || status == UINT_MAX) {
268		spin_unlock(&bus->reg_lock);
269		return ret;
270	}
271
272	/* clear rirb int */
 
273	status = snd_hdac_chip_readb(bus, RIRBSTS);
274	if (status & RIRB_INT_MASK) {
275		if (status & RIRB_INT_RESPONSE)
276			snd_hdac_bus_update_rirb(bus);
277		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
 
278	}
279
280	mask = (0x1 << bus->num_streams) - 1;
 
 
281
282	status = snd_hdac_chip_readl(bus, INTSTS);
283	status &= mask;
284	if (status) {
285		/* Disable stream interrupts; Re-enable in bottom half */
286		int_enable = snd_hdac_chip_readl(bus, INTCTL);
287		snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask)));
288		ret = IRQ_WAKE_THREAD;
289	} else {
290		ret = IRQ_HANDLED;
291	}
 
292
293	spin_unlock(&bus->reg_lock);
294	return ret;
295}
296
297static irqreturn_t hdac_bus_irq_thread(int irq, void *context)
298{
299	struct hdac_bus *bus = context;
300	u32 status;
301	u32 int_enable;
302	u32 mask;
303	unsigned long flags;
304
305	status = snd_hdac_chip_readl(bus, INTSTS);
 
 
306
307	snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream);
 
308
309	/* Re-enable stream interrupts */
310	mask = (0x1 << bus->num_streams) - 1;
311	spin_lock_irqsave(&bus->reg_lock, flags);
312	int_enable = snd_hdac_chip_readl(bus, INTCTL);
313	snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask));
314	spin_unlock_irqrestore(&bus->reg_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
316	return IRQ_HANDLED;
317}
318
319static int avs_hdac_acquire_irq(struct avs_dev *adev)
320{
321	struct hdac_bus *bus = &adev->base.core;
322	struct pci_dev *pci = to_pci_dev(bus->dev);
323	int ret;
324
325	/* request one and check that we only got one interrupt */
326	ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
327	if (ret != 1) {
328		dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
329		return ret;
330	}
331
332	ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus,
333			      KBUILD_MODNAME);
334	if (ret < 0) {
335		dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
336		goto free_vector;
337	}
338
339	ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
340			      KBUILD_MODNAME);
341	if (ret < 0) {
342		dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
343		goto free_stream_irq;
344	}
345
346	return 0;
347
348free_stream_irq:
349	pci_free_irq(pci, 0, bus);
350free_vector:
351	pci_free_irq_vectors(pci);
352	return ret;
353}
354
355static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
356{
357	struct hda_bus *bus = &adev->base;
358	struct avs_ipc *ipc;
359	struct device *dev = &pci->dev;
360	int ret;
361
362	ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
363	if (ret < 0)
364		return ret;
365
366	bus->core.use_posbuf = 1;
367	bus->core.bdl_pos_adj = 0;
368	bus->core.sync_write = 1;
369	bus->pci = pci;
370	bus->mixer_assigned = -1;
371	mutex_init(&bus->prepare_mutex);
372
373	ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
374	if (!ipc)
375		return -ENOMEM;
376	ret = avs_ipc_init(ipc, dev);
377	if (ret < 0)
378		return ret;
379
380	adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
381	if (!adev->modcfg_buf)
382		return -ENOMEM;
383
384	adev->dev = dev;
385	adev->spec = (const struct avs_spec *)id->driver_data;
386	adev->ipc = ipc;
387	adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
388	INIT_WORK(&adev->probe_work, avs_hda_probe_work);
389	INIT_LIST_HEAD(&adev->comp_list);
390	INIT_LIST_HEAD(&adev->path_list);
391	INIT_LIST_HEAD(&adev->fw_list);
392	init_completion(&adev->fw_ready);
393	spin_lock_init(&adev->path_list_lock);
394	mutex_init(&adev->modres_mutex);
395	mutex_init(&adev->comp_list_mutex);
396	mutex_init(&adev->path_mutex);
397
398	return 0;
399}
400
401static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
402{
403	struct hdac_bus *bus;
404	struct avs_dev *adev;
405	struct device *dev = &pci->dev;
406	int ret;
407
408	ret = snd_intel_dsp_driver_probe(pci);
409	if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS)
 
 
 
 
 
410		return -ENODEV;
 
411
412	ret = pcim_enable_device(pci);
413	if (ret < 0)
414		return ret;
415
416	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
417	if (!adev)
418		return -ENOMEM;
419	ret = avs_bus_init(adev, pci, id);
420	if (ret < 0) {
421		dev_err(dev, "failed to init avs bus: %d\n", ret);
422		return ret;
423	}
424
425	ret = pci_request_regions(pci, "AVS HDAudio");
426	if (ret < 0)
427		return ret;
428
429	bus = &adev->base.core;
430	bus->addr = pci_resource_start(pci, 0);
431	bus->remap_addr = pci_ioremap_bar(pci, 0);
432	if (!bus->remap_addr) {
433		dev_err(bus->dev, "ioremap error\n");
434		ret = -ENXIO;
435		goto err_remap_bar0;
436	}
437
438	adev->dsp_ba = pci_ioremap_bar(pci, 4);
439	if (!adev->dsp_ba) {
440		dev_err(bus->dev, "ioremap error\n");
441		ret = -ENXIO;
442		goto err_remap_bar4;
443	}
444
445	snd_hdac_bus_parse_capabilities(bus);
446	if (bus->mlcap)
447		snd_hdac_ext_bus_get_ml_capabilities(bus);
448
449	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
450		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
451	dma_set_max_seg_size(dev, UINT_MAX);
452
453	ret = avs_hdac_bus_init_streams(bus);
454	if (ret < 0) {
455		dev_err(dev, "failed to init streams: %d\n", ret);
456		goto err_init_streams;
457	}
458
459	ret = avs_hdac_acquire_irq(adev);
460	if (ret < 0) {
461		dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
462		goto err_acquire_irq;
463	}
464
465	pci_set_master(pci);
466	pci_set_drvdata(pci, bus);
467	device_disable_async_suspend(dev);
468
469	ret = snd_hdac_i915_init(bus);
470	if (ret == -EPROBE_DEFER)
471		goto err_i915_init;
472	else if (ret < 0)
473		dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
474
475	schedule_work(&adev->probe_work);
476
477	return 0;
478
479err_i915_init:
480	pci_free_irq(pci, 0, adev);
481	pci_free_irq(pci, 0, bus);
482	pci_free_irq_vectors(pci);
483	pci_clear_master(pci);
484	pci_set_drvdata(pci, NULL);
485err_acquire_irq:
486	snd_hdac_bus_free_stream_pages(bus);
487	snd_hdac_ext_stream_free_all(bus);
488err_init_streams:
489	iounmap(adev->dsp_ba);
490err_remap_bar4:
491	iounmap(bus->remap_addr);
492err_remap_bar0:
493	pci_release_regions(pci);
494	return ret;
495}
496
497static void avs_pci_shutdown(struct pci_dev *pci)
498{
499	struct hdac_bus *bus = pci_get_drvdata(pci);
500	struct avs_dev *adev = hdac_to_avs(bus);
501
502	cancel_work_sync(&adev->probe_work);
503	avs_ipc_block(adev->ipc);
504
505	snd_hdac_stop_streams(bus);
506	avs_dsp_op(adev, int_control, false);
507	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
508	snd_hdac_ext_bus_link_power_down_all(bus);
509
510	snd_hdac_bus_stop_chip(bus);
511	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
512
513	if (avs_platattr_test(adev, CLDMA))
514		pci_free_irq(pci, 0, &code_loader);
515	pci_free_irq(pci, 0, adev);
516	pci_free_irq(pci, 0, bus);
517	pci_free_irq_vectors(pci);
518}
519
520static void avs_pci_remove(struct pci_dev *pci)
521{
522	struct hdac_device *hdev, *save;
523	struct hdac_bus *bus = pci_get_drvdata(pci);
524	struct avs_dev *adev = hdac_to_avs(bus);
525
526	cancel_work_sync(&adev->probe_work);
527	avs_ipc_block(adev->ipc);
528
529	avs_unregister_all_boards(adev);
530
 
531	avs_debugfs_exit(adev);
532	if (adev->nhlt)
533		intel_nhlt_free(adev->nhlt);
534
535	if (avs_platattr_test(adev, CLDMA))
536		hda_cldma_free(&code_loader);
537
538	snd_hdac_stop_streams_and_chip(bus);
539	avs_dsp_op(adev, int_control, false);
540	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
541
542	/* it is safe to remove all codecs from the system now */
543	list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
544		snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
545
546	snd_hdac_bus_free_stream_pages(bus);
547	snd_hdac_ext_stream_free_all(bus);
548	/* reverse ml_capabilities */
549	snd_hdac_ext_link_free_all(bus);
550	snd_hdac_ext_bus_exit(bus);
551
552	avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
553	snd_hdac_ext_bus_ppcap_enable(bus, false);
554
555	/* snd_hdac_stop_streams_and_chip does that already? */
556	snd_hdac_bus_stop_chip(bus);
557	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
558	if (bus->audio_component)
559		snd_hdac_i915_exit(bus);
560
561	avs_module_info_free(adev);
562	pci_free_irq(pci, 0, adev);
563	pci_free_irq(pci, 0, bus);
564	pci_free_irq_vectors(pci);
565	iounmap(bus->remap_addr);
566	iounmap(adev->dsp_ba);
567	pci_release_regions(pci);
568
569	/* Firmware is not needed anymore */
570	avs_release_firmwares(adev);
571
572	/* pm_runtime_forbid() can rpm_resume() which we do not want */
573	pm_runtime_disable(&pci->dev);
574	pm_runtime_forbid(&pci->dev);
575	pm_runtime_enable(&pci->dev);
576	pm_runtime_get_noresume(&pci->dev);
577}
578
579static int avs_suspend_standby(struct avs_dev *adev)
580{
581	struct hdac_bus *bus = &adev->base.core;
582	struct pci_dev *pci = adev->base.pci;
583
584	if (bus->cmd_dma_state)
585		snd_hdac_bus_stop_cmd_io(bus);
586
587	snd_hdac_ext_bus_link_power_down_all(bus);
588
589	enable_irq_wake(pci->irq);
590	pci_save_state(pci);
591
592	return 0;
593}
594
595static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power)
596{
597	struct hdac_bus *bus = &adev->base.core;
598	int ret;
599
600	flush_work(&adev->probe_work);
601	if (low_power && adev->num_lp_paths)
602		return avs_suspend_standby(adev);
603
604	snd_hdac_ext_bus_link_power_down_all(bus);
605
606	ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
607	/*
608	 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
609	 * Do not block entire system from suspending if that's the case.
610	 */
611	if (ret && ret != -EPERM) {
612		dev_err(adev->dev, "set dx failed: %d\n", ret);
613		return AVS_IPC_RET(ret);
614	}
615
616	avs_ipc_block(adev->ipc);
617	avs_dsp_op(adev, int_control, false);
618	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
619
620	ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
621	if (ret < 0) {
622		dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
623		return ret;
624	}
625
626	snd_hdac_ext_bus_ppcap_enable(bus, false);
627	/* disable LP SRAM retention */
628	avs_hda_power_gating_enable(adev, false);
629	snd_hdac_bus_stop_chip(bus);
630	/* disable CG when putting controller to reset */
631	avs_hdac_clock_gating_enable(bus, false);
632	snd_hdac_bus_enter_link_reset(bus);
633	avs_hdac_clock_gating_enable(bus, true);
634
635	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
636
637	return 0;
638}
639
640static int avs_resume_standby(struct avs_dev *adev)
641{
642	struct hdac_bus *bus = &adev->base.core;
643	struct pci_dev *pci = adev->base.pci;
644
645	pci_restore_state(pci);
646	disable_irq_wake(pci->irq);
647
648	snd_hdac_ext_bus_link_power_up_all(bus);
649
650	if (bus->cmd_dma_state)
651		snd_hdac_bus_init_cmd_io(bus);
652
653	return 0;
654}
655
656static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
657{
658	struct hdac_bus *bus = &adev->base.core;
659	int ret;
660
661	if (low_power && adev->num_lp_paths)
662		return avs_resume_standby(adev);
663
664	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
665	avs_hdac_bus_init_chip(bus, true);
666
667	snd_hdac_ext_bus_ppcap_enable(bus, true);
668	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
669
670	ret = avs_dsp_boot_firmware(adev, purge);
671	if (ret < 0) {
672		dev_err(adev->dev, "firmware boot failed: %d\n", ret);
673		return ret;
674	}
675
676	return 0;
677}
678
679static int __maybe_unused avs_suspend(struct device *dev)
680{
681	return avs_suspend_common(to_avs_dev(dev), true);
682}
683
684static int __maybe_unused avs_resume(struct device *dev)
685{
686	return avs_resume_common(to_avs_dev(dev), true, true);
687}
688
689static int __maybe_unused avs_runtime_suspend(struct device *dev)
690{
691	return avs_suspend_common(to_avs_dev(dev), true);
692}
693
694static int __maybe_unused avs_runtime_resume(struct device *dev)
695{
696	return avs_resume_common(to_avs_dev(dev), true, false);
697}
698
699static int __maybe_unused avs_freeze(struct device *dev)
700{
701	return avs_suspend_common(to_avs_dev(dev), false);
702}
703static int __maybe_unused avs_thaw(struct device *dev)
704{
705	return avs_resume_common(to_avs_dev(dev), false, true);
706}
707
708static int __maybe_unused avs_poweroff(struct device *dev)
709{
710	return avs_suspend_common(to_avs_dev(dev), false);
711}
712
713static int __maybe_unused avs_restore(struct device *dev)
714{
715	return avs_resume_common(to_avs_dev(dev), false, true);
716}
717
718static const struct dev_pm_ops avs_dev_pm = {
719	.suspend = avs_suspend,
720	.resume = avs_resume,
721	.freeze = avs_freeze,
722	.thaw = avs_thaw,
723	.poweroff = avs_poweroff,
724	.restore = avs_restore,
725	SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
726};
727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728static const struct avs_spec skl_desc = {
729	.name = "skl",
730	.min_fw_version = {
731		.major = 9,
732		.minor = 21,
733		.hotfix = 0,
734		.build = 4732,
735	},
736	.dsp_ops = &skl_dsp_ops,
737	.core_init_mask = 1,
738	.attributes = AVS_PLATATTR_CLDMA,
739	.sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
740	.sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
741	.rom_status = SKL_ADSP_SRAM_BASE_OFFSET,
742};
743
744static const struct avs_spec apl_desc = {
745	.name = "apl",
746	.min_fw_version = {
747		.major = 9,
748		.minor = 22,
749		.hotfix = 1,
750		.build = 4323,
751	},
752	.dsp_ops = &apl_dsp_ops,
753	.core_init_mask = 3,
754	.attributes = AVS_PLATATTR_IMR,
755	.sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET,
756	.sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE,
757	.rom_status = APL_ADSP_SRAM_BASE_OFFSET,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
758};
759
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
760static const struct pci_device_id avs_ids[] = {
761	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
762	{ PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
763	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
764	{ PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
765	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
766	{ PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
767	{ PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
768	{ PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
769	{ 0 }
770};
771MODULE_DEVICE_TABLE(pci, avs_ids);
772
773static struct pci_driver avs_pci_driver = {
774	.name = KBUILD_MODNAME,
775	.id_table = avs_ids,
776	.probe = avs_pci_probe,
777	.remove = avs_pci_remove,
778	.shutdown = avs_pci_shutdown,
 
779	.driver = {
780		.pm = &avs_dev_pm,
781	},
782};
783module_pci_driver(avs_pci_driver);
784
785MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
786MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
787MODULE_DESCRIPTION("Intel cAVS sound driver");
788MODULE_LICENSE("GPL");