Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2023 Advanced Micro Devices, Inc */
  3
  4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  5
  6#include <linux/pci.h>
  7
  8#include <linux/pds/pds_common.h>
  9
 10#include "core.h"
 11
 12MODULE_DESCRIPTION(PDSC_DRV_DESCRIPTION);
 13MODULE_AUTHOR("Advanced Micro Devices, Inc");
 14MODULE_LICENSE("GPL");
 15
 16/* Supported devices */
 17static const struct pci_device_id pdsc_id_table[] = {
 18	{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_CORE_PF) },
 19	{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_VDPA_VF) },
 20	{ 0, }	/* end of table */
 21};
 22MODULE_DEVICE_TABLE(pci, pdsc_id_table);
 23
 24static void pdsc_wdtimer_cb(struct timer_list *t)
 25{
 26	struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
 27
 28	dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
 29	mod_timer(&pdsc->wdtimer,
 30		  round_jiffies(jiffies + pdsc->wdtimer_period));
 31
 32	queue_work(pdsc->wq, &pdsc->health_work);
 33}
 34
 35static void pdsc_unmap_bars(struct pdsc *pdsc)
 36{
 37	struct pdsc_dev_bar *bars = pdsc->bars;
 38	unsigned int i;
 39
 40	pdsc->info_regs = NULL;
 41	pdsc->cmd_regs = NULL;
 42	pdsc->intr_status = NULL;
 43	pdsc->intr_ctrl = NULL;
 44
 45	for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
 46		if (bars[i].vaddr)
 47			pci_iounmap(pdsc->pdev, bars[i].vaddr);
 48	}
 49}
 50
 51static int pdsc_map_bars(struct pdsc *pdsc)
 52{
 53	struct pdsc_dev_bar *bar = pdsc->bars;
 54	struct pci_dev *pdev = pdsc->pdev;
 55	struct device *dev = pdsc->dev;
 56	struct pdsc_dev_bar *bars;
 57	unsigned int i, j;
 58	int num_bars = 0;
 59	int err;
 60	u32 sig;
 61
 62	bars = pdsc->bars;
 63
 64	/* Since the PCI interface in the hardware is configurable,
 65	 * we need to poke into all the bars to find the set we're
 66	 * expecting.
 67	 */
 68	for (i = 0, j = 0; i < PDS_CORE_BARS_MAX; i++) {
 69		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
 70			continue;
 71
 72		bars[j].len = pci_resource_len(pdev, i);
 73		bars[j].bus_addr = pci_resource_start(pdev, i);
 74		bars[j].res_index = i;
 75
 76		/* only map the whole bar 0 */
 77		if (j > 0) {
 78			bars[j].vaddr = NULL;
 79		} else {
 80			bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
 81			if (!bars[j].vaddr) {
 82				dev_err(dev, "Cannot map BAR %d, aborting\n", i);
 83				return -ENODEV;
 84			}
 85		}
 86
 87		j++;
 88	}
 89	num_bars = j;
 90
 91	/* BAR0: dev_cmd and interrupts */
 92	if (num_bars < 1) {
 93		dev_err(dev, "No bars found\n");
 94		err = -EFAULT;
 95		goto err_out;
 96	}
 97
 98	if (bar->len < PDS_CORE_BAR0_SIZE) {
 99		dev_err(dev, "Resource bar size %lu too small\n", bar->len);
100		err = -EFAULT;
101		goto err_out;
102	}
103
104	pdsc->info_regs = bar->vaddr + PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET;
105	pdsc->cmd_regs = bar->vaddr + PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET;
106	pdsc->intr_status = bar->vaddr + PDS_CORE_BAR0_INTR_STATUS_OFFSET;
107	pdsc->intr_ctrl = bar->vaddr + PDS_CORE_BAR0_INTR_CTRL_OFFSET;
108
109	sig = ioread32(&pdsc->info_regs->signature);
110	if (sig != PDS_CORE_DEV_INFO_SIGNATURE) {
111		dev_err(dev, "Incompatible firmware signature %x", sig);
112		err = -EFAULT;
113		goto err_out;
114	}
115
116	/* BAR1: doorbells */
117	bar++;
118	if (num_bars < 2) {
119		dev_err(dev, "Doorbell bar missing\n");
120		err = -EFAULT;
121		goto err_out;
122	}
123
124	pdsc->db_pages = bar->vaddr;
125	pdsc->phy_db_pages = bar->bus_addr;
126
127	return 0;
128
129err_out:
130	pdsc_unmap_bars(pdsc);
131	return err;
132}
133
134void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num)
135{
136	return pci_iomap_range(pdsc->pdev,
137			       pdsc->bars[PDS_CORE_PCI_BAR_DBELL].res_index,
138			       (u64)page_num << PAGE_SHIFT, PAGE_SIZE);
139}
140
141static int pdsc_sriov_configure(struct pci_dev *pdev, int num_vfs)
142{
143	struct pdsc *pdsc = pci_get_drvdata(pdev);
144	struct device *dev = pdsc->dev;
145	int ret = 0;
146
147	if (num_vfs > 0) {
148		pdsc->vfs = kcalloc(num_vfs, sizeof(struct pdsc_vf),
149				    GFP_KERNEL);
150		if (!pdsc->vfs)
151			return -ENOMEM;
152		pdsc->num_vfs = num_vfs;
153
154		ret = pci_enable_sriov(pdev, num_vfs);
155		if (ret) {
156			dev_err(dev, "Cannot enable SRIOV: %pe\n",
157				ERR_PTR(ret));
158			goto no_vfs;
159		}
160
161		return num_vfs;
162	}
163
164no_vfs:
165	pci_disable_sriov(pdev);
166
167	kfree(pdsc->vfs);
168	pdsc->vfs = NULL;
169	pdsc->num_vfs = 0;
170
171	return ret;
172}
173
174static int pdsc_init_vf(struct pdsc *vf)
175{
176	struct devlink *dl;
177	struct pdsc *pf;
178	int err;
179
180	pf = pdsc_get_pf_struct(vf->pdev);
181	if (IS_ERR_OR_NULL(pf))
182		return PTR_ERR(pf) ?: -1;
183
184	vf->vf_id = pci_iov_vf_id(vf->pdev);
185
186	dl = priv_to_devlink(vf);
187	devl_lock(dl);
188	devl_register(dl);
189	devl_unlock(dl);
190
191	pf->vfs[vf->vf_id].vf = vf;
192	err = pdsc_auxbus_dev_add(vf, pf);
193	if (err) {
194		devl_lock(dl);
195		devl_unregister(dl);
196		devl_unlock(dl);
197	}
198
199	return err;
200}
201
202static const struct devlink_health_reporter_ops pdsc_fw_reporter_ops = {
203	.name = "fw",
204	.diagnose = pdsc_fw_reporter_diagnose,
205};
206
207static const struct devlink_param pdsc_dl_params[] = {
208	DEVLINK_PARAM_GENERIC(ENABLE_VNET,
209			      BIT(DEVLINK_PARAM_CMODE_RUNTIME),
210			      pdsc_dl_enable_get,
211			      pdsc_dl_enable_set,
212			      pdsc_dl_enable_validate),
213};
214
215#define PDSC_WQ_NAME_LEN 24
216
217static int pdsc_init_pf(struct pdsc *pdsc)
218{
219	struct devlink_health_reporter *hr;
220	char wq_name[PDSC_WQ_NAME_LEN];
221	struct devlink *dl;
222	int err;
223
224	pcie_print_link_status(pdsc->pdev);
225
226	err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
227	if (err) {
228		dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
229			ERR_PTR(err));
230		return err;
231	}
232
233	err = pdsc_map_bars(pdsc);
234	if (err)
235		goto err_out_release_regions;
236
237	/* General workqueue and timer, but don't start timer yet */
238	snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
239	pdsc->wq = create_singlethread_workqueue(wq_name);
240	INIT_WORK(&pdsc->health_work, pdsc_health_thread);
241	timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
242	pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
243
244	mutex_init(&pdsc->devcmd_lock);
245	mutex_init(&pdsc->config_lock);
246	spin_lock_init(&pdsc->adminq_lock);
247
248	mutex_lock(&pdsc->config_lock);
249	set_bit(PDSC_S_FW_DEAD, &pdsc->state);
250
251	err = pdsc_setup(pdsc, PDSC_SETUP_INIT);
252	if (err) {
253		mutex_unlock(&pdsc->config_lock);
254		goto err_out_unmap_bars;
255	}
256
257	err = pdsc_start(pdsc);
258	if (err) {
259		mutex_unlock(&pdsc->config_lock);
260		goto err_out_teardown;
261	}
262
263	mutex_unlock(&pdsc->config_lock);
264
265	dl = priv_to_devlink(pdsc);
266	devl_lock(dl);
267	err = devl_params_register(dl, pdsc_dl_params,
268				   ARRAY_SIZE(pdsc_dl_params));
269	if (err) {
270		devl_unlock(dl);
271		dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
272			 ERR_PTR(err));
273		goto err_out_stop;
274	}
275
276	hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
277	if (IS_ERR(hr)) {
278		devl_unlock(dl);
279		dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
280		err = PTR_ERR(hr);
281		goto err_out_unreg_params;
282	}
283	pdsc->fw_reporter = hr;
284
285	devl_register(dl);
286	devl_unlock(dl);
287
288	/* Lastly, start the health check timer */
289	mod_timer(&pdsc->wdtimer, round_jiffies(jiffies + pdsc->wdtimer_period));
290
291	return 0;
292
293err_out_unreg_params:
294	devlink_params_unregister(dl, pdsc_dl_params,
295				  ARRAY_SIZE(pdsc_dl_params));
296err_out_stop:
297	pdsc_stop(pdsc);
298err_out_teardown:
299	pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
300err_out_unmap_bars:
301	timer_shutdown_sync(&pdsc->wdtimer);
302	if (pdsc->wq)
303		destroy_workqueue(pdsc->wq);
304	mutex_destroy(&pdsc->config_lock);
305	mutex_destroy(&pdsc->devcmd_lock);
306	pci_free_irq_vectors(pdsc->pdev);
307	pdsc_unmap_bars(pdsc);
308err_out_release_regions:
309	pci_release_regions(pdsc->pdev);
310
311	return err;
312}
313
314static const struct devlink_ops pdsc_dl_ops = {
315	.info_get	= pdsc_dl_info_get,
316	.flash_update	= pdsc_dl_flash_update,
317};
318
319static const struct devlink_ops pdsc_dl_vf_ops = {
320};
321
322static DEFINE_IDA(pdsc_ida);
323
324static int pdsc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
325{
326	struct device *dev = &pdev->dev;
327	const struct devlink_ops *ops;
328	struct devlink *dl;
329	struct pdsc *pdsc;
330	bool is_pf;
331	int err;
332
333	is_pf = !pdev->is_virtfn;
334	ops = is_pf ? &pdsc_dl_ops : &pdsc_dl_vf_ops;
335	dl = devlink_alloc(ops, sizeof(struct pdsc), dev);
336	if (!dl)
337		return -ENOMEM;
338	pdsc = devlink_priv(dl);
339
340	pdsc->pdev = pdev;
341	pdsc->dev = &pdev->dev;
342	set_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
343	pci_set_drvdata(pdev, pdsc);
344	pdsc_debugfs_add_dev(pdsc);
345
346	err = ida_alloc(&pdsc_ida, GFP_KERNEL);
347	if (err < 0) {
348		dev_err(pdsc->dev, "%s: id alloc failed: %pe\n",
349			__func__, ERR_PTR(err));
350		goto err_out_free_devlink;
351	}
352	pdsc->uid = err;
353
354	/* Query system for DMA addressing limitation for the device. */
355	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(PDS_CORE_ADDR_LEN));
356	if (err) {
357		dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting: %pe\n",
358			ERR_PTR(err));
359		goto err_out_free_ida;
360	}
361
362	err = pci_enable_device(pdev);
363	if (err) {
364		dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
365		goto err_out_free_ida;
366	}
367	pci_set_master(pdev);
368
369	if (is_pf)
370		err = pdsc_init_pf(pdsc);
371	else
372		err = pdsc_init_vf(pdsc);
373	if (err) {
374		dev_err(dev, "Cannot init device: %pe\n", ERR_PTR(err));
375		goto err_out_disable_device;
376	}
377
378	clear_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
379	return 0;
380
381err_out_disable_device:
382	pci_disable_device(pdev);
383err_out_free_ida:
384	ida_free(&pdsc_ida, pdsc->uid);
385err_out_free_devlink:
386	pdsc_debugfs_del_dev(pdsc);
387	devlink_free(dl);
388
389	return err;
390}
391
392static void pdsc_remove(struct pci_dev *pdev)
393{
394	struct pdsc *pdsc = pci_get_drvdata(pdev);
395	struct devlink *dl;
396
397	/* Unhook the registrations first to be sure there
398	 * are no requests while we're stopping.
399	 */
400	dl = priv_to_devlink(pdsc);
401	devl_lock(dl);
402	devl_unregister(dl);
403	if (!pdev->is_virtfn) {
404		if (pdsc->fw_reporter) {
405			devl_health_reporter_destroy(pdsc->fw_reporter);
406			pdsc->fw_reporter = NULL;
407		}
408		devl_params_unregister(dl, pdsc_dl_params,
409				       ARRAY_SIZE(pdsc_dl_params));
410	}
411	devl_unlock(dl);
412
413	if (pdev->is_virtfn) {
414		struct pdsc *pf;
415
416		pf = pdsc_get_pf_struct(pdsc->pdev);
417		if (!IS_ERR(pf)) {
418			pdsc_auxbus_dev_del(pdsc, pf);
419			pf->vfs[pdsc->vf_id].vf = NULL;
420		}
421	} else {
422		/* Remove the VFs and their aux_bus connections before other
423		 * cleanup so that the clients can use the AdminQ to cleanly
424		 * shut themselves down.
425		 */
426		pdsc_sriov_configure(pdev, 0);
427
428		timer_shutdown_sync(&pdsc->wdtimer);
429		if (pdsc->wq)
430			destroy_workqueue(pdsc->wq);
431
432		mutex_lock(&pdsc->config_lock);
433		set_bit(PDSC_S_STOPPING_DRIVER, &pdsc->state);
434
435		pdsc_stop(pdsc);
436		pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
437		mutex_unlock(&pdsc->config_lock);
438		mutex_destroy(&pdsc->config_lock);
439		mutex_destroy(&pdsc->devcmd_lock);
440
441		pdsc_unmap_bars(pdsc);
442		pci_release_regions(pdev);
443	}
444
445	pci_disable_device(pdev);
446
447	ida_free(&pdsc_ida, pdsc->uid);
448	pdsc_debugfs_del_dev(pdsc);
449	devlink_free(dl);
450}
451
452static void pdsc_stop_health_thread(struct pdsc *pdsc)
453{
454	if (pdsc->pdev->is_virtfn)
455		return;
456
457	timer_shutdown_sync(&pdsc->wdtimer);
458	if (pdsc->health_work.func)
459		cancel_work_sync(&pdsc->health_work);
460}
461
462static void pdsc_restart_health_thread(struct pdsc *pdsc)
463{
464	if (pdsc->pdev->is_virtfn)
465		return;
466
467	timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
468	mod_timer(&pdsc->wdtimer, jiffies + 1);
469}
470
471void pdsc_reset_prepare(struct pci_dev *pdev)
472{
473	struct pdsc *pdsc = pci_get_drvdata(pdev);
474
475	pdsc_stop_health_thread(pdsc);
476	pdsc_fw_down(pdsc);
477
478	pdsc_unmap_bars(pdsc);
479	pci_release_regions(pdev);
480	pci_disable_device(pdev);
481}
482
483void pdsc_reset_done(struct pci_dev *pdev)
484{
485	struct pdsc *pdsc = pci_get_drvdata(pdev);
486	struct device *dev = pdsc->dev;
487	int err;
488
489	err = pci_enable_device(pdev);
490	if (err) {
491		dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
492		return;
493	}
494	pci_set_master(pdev);
495
496	if (!pdev->is_virtfn) {
497		pcie_print_link_status(pdsc->pdev);
498
499		err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
500		if (err) {
501			dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
502				ERR_PTR(err));
503			return;
504		}
505
506		err = pdsc_map_bars(pdsc);
507		if (err)
508			return;
509	}
510
511	pdsc_fw_up(pdsc);
512	pdsc_restart_health_thread(pdsc);
513}
514
515static const struct pci_error_handlers pdsc_err_handler = {
516	/* FLR handling */
517	.reset_prepare      = pdsc_reset_prepare,
518	.reset_done         = pdsc_reset_done,
519};
520
521static struct pci_driver pdsc_driver = {
522	.name = PDS_CORE_DRV_NAME,
523	.id_table = pdsc_id_table,
524	.probe = pdsc_probe,
525	.remove = pdsc_remove,
526	.sriov_configure = pdsc_sriov_configure,
527	.err_handler = &pdsc_err_handler,
528};
529
530void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
531{
532	return pci_iov_get_pf_drvdata(vf_pdev, &pdsc_driver);
533}
534EXPORT_SYMBOL_GPL(pdsc_get_pf_struct);
535
536static int __init pdsc_init_module(void)
537{
538	if (strcmp(KBUILD_MODNAME, PDS_CORE_DRV_NAME))
539		return -EINVAL;
540
541	pdsc_debugfs_create();
542	return pci_register_driver(&pdsc_driver);
543}
544
545static void __exit pdsc_cleanup_module(void)
546{
547	pci_unregister_driver(&pdsc_driver);
548	pdsc_debugfs_destroy();
549}
550
551module_init(pdsc_init_module);
552module_exit(pdsc_cleanup_module);