Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Device Feature List (DFL) PCIe device
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Zhang Yi <Yi.Z.Zhang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Joseph Grecco <joe.grecco@intel.com>
 11 *   Enno Luebbers <enno.luebbers@intel.com>
 12 *   Tim Whisonant <tim.whisonant@intel.com>
 13 *   Ananda Ravuri <ananda.ravuri@intel.com>
 14 *   Henry Mitchel <henry.mitchel@intel.com>
 15 */
 16
 17#include <linux/pci.h>
 18#include <linux/dma-mapping.h>
 19#include <linux/types.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/stddef.h>
 23#include <linux/errno.h>
 24#include <linux/aer.h>
 25
 26#include "dfl.h"
 27
 28#define DRV_VERSION	"0.8"
 29#define DRV_NAME	"dfl-pci"
 30
 31#define PCI_VSEC_ID_INTEL_DFLS 0x43
 32
 33#define PCI_VNDR_DFLS_CNT 0x8
 34#define PCI_VNDR_DFLS_RES 0xc
 35
 36#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
 37#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
 38
 39struct cci_drvdata {
 40	struct dfl_fpga_cdev *cdev;	/* container device */
 41};
 42
 43static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
 44{
 45	if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
 46		return NULL;
 47
 48	return pcim_iomap_table(pcidev)[0];
 49}
 50
 51static int cci_pci_alloc_irq(struct pci_dev *pcidev)
 52{
 53	int ret, nvec = pci_msix_vec_count(pcidev);
 54
 55	if (nvec <= 0) {
 56		dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
 57		return 0;
 58	}
 59
 60	ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
 61	if (ret < 0)
 62		return ret;
 63
 64	return nvec;
 65}
 66
 67static void cci_pci_free_irq(struct pci_dev *pcidev)
 68{
 69	pci_free_irq_vectors(pcidev);
 70}
 71
 72/* PCI Device ID */
 73#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
 74#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
 75#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
 76#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
 77#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
 78#define PCIE_DEVICE_ID_SILICOM_PAC_N5010	0x1000
 79#define PCIE_DEVICE_ID_SILICOM_PAC_N5011	0x1001
 80#define PCIE_DEVICE_ID_INTEL_DFL		0xbcce
 81/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
 82#define PCIE_SUBDEVICE_ID_INTEL_N6000		0x1770
 83#define PCIE_SUBDEVICE_ID_INTEL_N6001		0x1771
 84#define PCIE_SUBDEVICE_ID_INTEL_C6100		0x17d4
 85
 86/* VF Device */
 87#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
 88#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
 89#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
 90#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
 91#define PCIE_DEVICE_ID_INTEL_DFL_VF		0xbccf
 92
 93static struct pci_device_id cci_pcie_id_tbl[] = {
 94	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
 95	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
 96	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
 97	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
 98	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
 99	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
100	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
101	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
102	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
103	{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
104	{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
105	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
106			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
107	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
108			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
109	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
110			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
111	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
112			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
113	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
114			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
115	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
116			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
117	{0,}
118};
119MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
120
121static int cci_init_drvdata(struct pci_dev *pcidev)
122{
123	struct cci_drvdata *drvdata;
124
125	drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
126	if (!drvdata)
127		return -ENOMEM;
128
129	pci_set_drvdata(pcidev, drvdata);
130
131	return 0;
132}
133
134static void cci_remove_feature_devs(struct pci_dev *pcidev)
135{
136	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
137
138	/* remove all children feature devices */
139	dfl_fpga_feature_devs_remove(drvdata->cdev);
140	cci_pci_free_irq(pcidev);
141}
142
143static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
144{
145	unsigned int i;
146	int *table;
147
148	table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
149	if (!table)
150		return table;
151
152	for (i = 0; i < nvec; i++)
153		table[i] = pci_irq_vector(pcidev, i);
154
155	return table;
156}
157
158static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
159{
160	u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
161	int dfl_res_off, i, bars, voff = 0;
162	resource_size_t start, len;
163
164	while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
165		vndr_hdr = 0;
166		pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
167
168		if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
169		    pcidev->vendor == PCI_VENDOR_ID_INTEL)
170			break;
171	}
172
173	if (!voff) {
174		dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
175		return -ENODEV;
176	}
177
178	dfl_cnt = 0;
179	pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
180	if (dfl_cnt > PCI_STD_NUM_BARS) {
181		dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
182			__func__, dfl_cnt, PCI_STD_NUM_BARS);
183		return -EINVAL;
184	}
185
186	dfl_res_off = voff + PCI_VNDR_DFLS_RES;
187	if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
188		dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
189			__func__);
190		return -EINVAL;
191	}
192
193	for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
194		dfl_res = GENMASK(31, 0);
195		pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
196
197		bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
198		if (bir >= PCI_STD_NUM_BARS) {
199			dev_err(&pcidev->dev, "%s bad bir number %d\n",
200				__func__, bir);
201			return -EINVAL;
202		}
203
204		if (bars & BIT(bir)) {
205			dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
206				__func__, bir);
207			return -EINVAL;
208		}
209
210		bars |= BIT(bir);
211
212		len = pci_resource_len(pcidev, bir);
213		offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
214		if (offset >= len) {
215			dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
216				__func__, offset, &len);
217			return -EINVAL;
218		}
219
220		dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
221
222		len -= offset;
223
224		start = pci_resource_start(pcidev, bir) + offset;
225
226		dfl_fpga_enum_info_add_dfl(info, start, len);
227	}
228
229	return 0;
230}
231
232/* default method of finding dfls starting at offset 0 of bar 0 */
233static int find_dfls_by_default(struct pci_dev *pcidev,
234				struct dfl_fpga_enum_info *info)
235{
236	int port_num, bar, i, ret = 0;
237	resource_size_t start, len;
238	void __iomem *base;
239	u32 offset;
240	u64 v;
241
242	/* start to find Device Feature List from Bar 0 */
243	base = cci_pci_ioremap_bar0(pcidev);
244	if (!base)
245		return -ENOMEM;
246
247	/*
248	 * PF device has FME and Ports/AFUs, and VF device only has one
249	 * Port/AFU. Check them and add related "Device Feature List" info
250	 * for the next step enumeration.
251	 */
252	if (dfl_feature_is_fme(base)) {
253		start = pci_resource_start(pcidev, 0);
254		len = pci_resource_len(pcidev, 0);
255
256		dfl_fpga_enum_info_add_dfl(info, start, len);
257
258		/*
259		 * find more Device Feature Lists (e.g. Ports) per information
260		 * indicated by FME module.
261		 */
262		v = readq(base + FME_HDR_CAP);
263		port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
264
265		WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
266
267		for (i = 0; i < port_num; i++) {
268			v = readq(base + FME_HDR_PORT_OFST(i));
269
270			/* skip ports which are not implemented. */
271			if (!(v & FME_PORT_OFST_IMP))
272				continue;
273
274			/*
275			 * add Port's Device Feature List information for next
276			 * step enumeration.
277			 */
278			bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
279			offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
280			if (bar == FME_PORT_OFST_BAR_SKIP) {
281				continue;
282			} else if (bar >= PCI_STD_NUM_BARS) {
283				dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
284					bar, i);
285				ret = -EINVAL;
286				break;
287			}
288
289			start = pci_resource_start(pcidev, bar) + offset;
290			len = pci_resource_len(pcidev, bar) - offset;
291
292			dfl_fpga_enum_info_add_dfl(info, start, len);
293		}
294	} else if (dfl_feature_is_port(base)) {
295		start = pci_resource_start(pcidev, 0);
296		len = pci_resource_len(pcidev, 0);
297
298		dfl_fpga_enum_info_add_dfl(info, start, len);
299	} else {
300		ret = -ENODEV;
301	}
302
303	/* release I/O mappings for next step enumeration */
304	pcim_iounmap_regions(pcidev, BIT(0));
305
306	return ret;
307}
308
309/* enumerate feature devices under pci device */
310static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
311{
312	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
313	struct dfl_fpga_enum_info *info;
314	struct dfl_fpga_cdev *cdev;
315	int nvec, ret = 0;
316	int *irq_table;
317
318	/* allocate enumeration info via pci_dev */
319	info = dfl_fpga_enum_info_alloc(&pcidev->dev);
320	if (!info)
321		return -ENOMEM;
322
323	/* add irq info for enumeration if the device support irq */
324	nvec = cci_pci_alloc_irq(pcidev);
325	if (nvec < 0) {
326		dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
327		ret = nvec;
328		goto enum_info_free_exit;
329	} else if (nvec) {
330		irq_table = cci_pci_create_irq_table(pcidev, nvec);
331		if (!irq_table) {
332			ret = -ENOMEM;
333			goto irq_free_exit;
334		}
335
336		ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
337		kfree(irq_table);
338		if (ret)
339			goto irq_free_exit;
340	}
341
342	ret = find_dfls_by_vsec(pcidev, info);
343	if (ret == -ENODEV)
344		ret = find_dfls_by_default(pcidev, info);
345
346	if (ret)
347		goto irq_free_exit;
348
349	/* start enumeration with prepared enumeration information */
350	cdev = dfl_fpga_feature_devs_enumerate(info);
351	if (IS_ERR(cdev)) {
352		dev_err(&pcidev->dev, "Enumeration failure\n");
353		ret = PTR_ERR(cdev);
354		goto irq_free_exit;
355	}
356
357	drvdata->cdev = cdev;
358
359irq_free_exit:
360	if (ret)
361		cci_pci_free_irq(pcidev);
362enum_info_free_exit:
363	dfl_fpga_enum_info_free(info);
364
365	return ret;
366}
367
368static
369int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
370{
371	int ret;
372
373	ret = pcim_enable_device(pcidev);
374	if (ret < 0) {
375		dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
376		return ret;
377	}
378
379	ret = pci_enable_pcie_error_reporting(pcidev);
380	if (ret && ret != -EINVAL)
381		dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
382
383	pci_set_master(pcidev);
384
385	ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
386	if (ret)
387		ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
388	if (ret) {
 
 
 
 
 
 
389		dev_err(&pcidev->dev, "No suitable DMA support available.\n");
390		goto disable_error_report_exit;
391	}
392
393	ret = cci_init_drvdata(pcidev);
394	if (ret) {
395		dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
396		goto disable_error_report_exit;
397	}
398
399	ret = cci_enumerate_feature_devs(pcidev);
400	if (!ret)
401		return ret;
402
403	dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
404
405disable_error_report_exit:
406	pci_disable_pcie_error_reporting(pcidev);
407	return ret;
408}
409
410static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
411{
412	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
413	struct dfl_fpga_cdev *cdev = drvdata->cdev;
414
415	if (!num_vfs) {
416		/*
417		 * disable SRIOV and then put released ports back to default
418		 * PF access mode.
419		 */
420		pci_disable_sriov(pcidev);
421
422		dfl_fpga_cdev_config_ports_pf(cdev);
423
424	} else {
425		int ret;
426
427		/*
428		 * before enable SRIOV, put released ports into VF access mode
429		 * first of all.
430		 */
431		ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
432		if (ret)
433			return ret;
434
435		ret = pci_enable_sriov(pcidev, num_vfs);
436		if (ret) {
437			dfl_fpga_cdev_config_ports_pf(cdev);
438			return ret;
439		}
440	}
441
442	return num_vfs;
443}
444
445static void cci_pci_remove(struct pci_dev *pcidev)
446{
447	if (dev_is_pf(&pcidev->dev))
448		cci_pci_sriov_configure(pcidev, 0);
449
450	cci_remove_feature_devs(pcidev);
451	pci_disable_pcie_error_reporting(pcidev);
452}
453
454static struct pci_driver cci_pci_driver = {
455	.name = DRV_NAME,
456	.id_table = cci_pcie_id_tbl,
457	.probe = cci_pci_probe,
458	.remove = cci_pci_remove,
459	.sriov_configure = cci_pci_sriov_configure,
460};
461
462module_pci_driver(cci_pci_driver);
463
464MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
465MODULE_AUTHOR("Intel Corporation");
466MODULE_LICENSE("GPL v2");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Device Feature List (DFL) PCIe device
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Zhang Yi <Yi.Z.Zhang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Joseph Grecco <joe.grecco@intel.com>
 11 *   Enno Luebbers <enno.luebbers@intel.com>
 12 *   Tim Whisonant <tim.whisonant@intel.com>
 13 *   Ananda Ravuri <ananda.ravuri@intel.com>
 14 *   Henry Mitchel <henry.mitchel@intel.com>
 15 */
 16
 17#include <linux/pci.h>
 
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/stddef.h>
 22#include <linux/errno.h>
 23#include <linux/aer.h>
 24
 25#include "dfl.h"
 26
 27#define DRV_VERSION	"0.8"
 28#define DRV_NAME	"dfl-pci"
 29
 30#define PCI_VSEC_ID_INTEL_DFLS 0x43
 31
 32#define PCI_VNDR_DFLS_CNT 0x8
 33#define PCI_VNDR_DFLS_RES 0xc
 34
 35#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
 36#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
 37
 38struct cci_drvdata {
 39	struct dfl_fpga_cdev *cdev;	/* container device */
 40};
 41
 42static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
 43{
 44	if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
 45		return NULL;
 46
 47	return pcim_iomap_table(pcidev)[0];
 48}
 49
 50static int cci_pci_alloc_irq(struct pci_dev *pcidev)
 51{
 52	int ret, nvec = pci_msix_vec_count(pcidev);
 53
 54	if (nvec <= 0) {
 55		dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
 56		return 0;
 57	}
 58
 59	ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
 60	if (ret < 0)
 61		return ret;
 62
 63	return nvec;
 64}
 65
 66static void cci_pci_free_irq(struct pci_dev *pcidev)
 67{
 68	pci_free_irq_vectors(pcidev);
 69}
 70
 71/* PCI Device ID */
 72#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
 73#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
 74#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
 75#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
 76#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
 
 
 
 
 
 
 
 
 77/* VF Device */
 78#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
 79#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
 80#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
 81#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
 
 82
 83static struct pci_device_id cci_pcie_id_tbl[] = {
 84	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
 85	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
 86	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
 87	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
 88	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
 89	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
 90	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
 91	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
 92	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93	{0,}
 94};
 95MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
 96
 97static int cci_init_drvdata(struct pci_dev *pcidev)
 98{
 99	struct cci_drvdata *drvdata;
100
101	drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
102	if (!drvdata)
103		return -ENOMEM;
104
105	pci_set_drvdata(pcidev, drvdata);
106
107	return 0;
108}
109
110static void cci_remove_feature_devs(struct pci_dev *pcidev)
111{
112	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
113
114	/* remove all children feature devices */
115	dfl_fpga_feature_devs_remove(drvdata->cdev);
116	cci_pci_free_irq(pcidev);
117}
118
119static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
120{
121	unsigned int i;
122	int *table;
123
124	table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
125	if (!table)
126		return table;
127
128	for (i = 0; i < nvec; i++)
129		table[i] = pci_irq_vector(pcidev, i);
130
131	return table;
132}
133
134static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
135{
136	u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
137	int dfl_res_off, i, bars, voff = 0;
138	resource_size_t start, len;
139
140	while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
141		vndr_hdr = 0;
142		pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
143
144		if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
145		    pcidev->vendor == PCI_VENDOR_ID_INTEL)
146			break;
147	}
148
149	if (!voff) {
150		dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
151		return -ENODEV;
152	}
153
154	dfl_cnt = 0;
155	pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
156	if (dfl_cnt > PCI_STD_NUM_BARS) {
157		dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
158			__func__, dfl_cnt, PCI_STD_NUM_BARS);
159		return -EINVAL;
160	}
161
162	dfl_res_off = voff + PCI_VNDR_DFLS_RES;
163	if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
164		dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
165			__func__);
166		return -EINVAL;
167	}
168
169	for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
170		dfl_res = GENMASK(31, 0);
171		pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
172
173		bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
174		if (bir >= PCI_STD_NUM_BARS) {
175			dev_err(&pcidev->dev, "%s bad bir number %d\n",
176				__func__, bir);
177			return -EINVAL;
178		}
179
180		if (bars & BIT(bir)) {
181			dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
182				__func__, bir);
183			return -EINVAL;
184		}
185
186		bars |= BIT(bir);
187
188		len = pci_resource_len(pcidev, bir);
189		offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
190		if (offset >= len) {
191			dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
192				__func__, offset, &len);
193			return -EINVAL;
194		}
195
196		dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
197
198		len -= offset;
199
200		start = pci_resource_start(pcidev, bir) + offset;
201
202		dfl_fpga_enum_info_add_dfl(info, start, len);
203	}
204
205	return 0;
206}
207
208/* default method of finding dfls starting at offset 0 of bar 0 */
209static int find_dfls_by_default(struct pci_dev *pcidev,
210				struct dfl_fpga_enum_info *info)
211{
212	int port_num, bar, i, ret = 0;
213	resource_size_t start, len;
214	void __iomem *base;
215	u32 offset;
216	u64 v;
217
218	/* start to find Device Feature List from Bar 0 */
219	base = cci_pci_ioremap_bar0(pcidev);
220	if (!base)
221		return -ENOMEM;
222
223	/*
224	 * PF device has FME and Ports/AFUs, and VF device only has one
225	 * Port/AFU. Check them and add related "Device Feature List" info
226	 * for the next step enumeration.
227	 */
228	if (dfl_feature_is_fme(base)) {
229		start = pci_resource_start(pcidev, 0);
230		len = pci_resource_len(pcidev, 0);
231
232		dfl_fpga_enum_info_add_dfl(info, start, len);
233
234		/*
235		 * find more Device Feature Lists (e.g. Ports) per information
236		 * indicated by FME module.
237		 */
238		v = readq(base + FME_HDR_CAP);
239		port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
240
241		WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
242
243		for (i = 0; i < port_num; i++) {
244			v = readq(base + FME_HDR_PORT_OFST(i));
245
246			/* skip ports which are not implemented. */
247			if (!(v & FME_PORT_OFST_IMP))
248				continue;
249
250			/*
251			 * add Port's Device Feature List information for next
252			 * step enumeration.
253			 */
254			bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
255			offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
 
 
 
 
 
 
 
 
 
256			start = pci_resource_start(pcidev, bar) + offset;
257			len = pci_resource_len(pcidev, bar) - offset;
258
259			dfl_fpga_enum_info_add_dfl(info, start, len);
260		}
261	} else if (dfl_feature_is_port(base)) {
262		start = pci_resource_start(pcidev, 0);
263		len = pci_resource_len(pcidev, 0);
264
265		dfl_fpga_enum_info_add_dfl(info, start, len);
266	} else {
267		ret = -ENODEV;
268	}
269
270	/* release I/O mappings for next step enumeration */
271	pcim_iounmap_regions(pcidev, BIT(0));
272
273	return ret;
274}
275
276/* enumerate feature devices under pci device */
277static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
278{
279	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
280	struct dfl_fpga_enum_info *info;
281	struct dfl_fpga_cdev *cdev;
282	int nvec, ret = 0;
283	int *irq_table;
284
285	/* allocate enumeration info via pci_dev */
286	info = dfl_fpga_enum_info_alloc(&pcidev->dev);
287	if (!info)
288		return -ENOMEM;
289
290	/* add irq info for enumeration if the device support irq */
291	nvec = cci_pci_alloc_irq(pcidev);
292	if (nvec < 0) {
293		dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
294		ret = nvec;
295		goto enum_info_free_exit;
296	} else if (nvec) {
297		irq_table = cci_pci_create_irq_table(pcidev, nvec);
298		if (!irq_table) {
299			ret = -ENOMEM;
300			goto irq_free_exit;
301		}
302
303		ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
304		kfree(irq_table);
305		if (ret)
306			goto irq_free_exit;
307	}
308
309	ret = find_dfls_by_vsec(pcidev, info);
310	if (ret == -ENODEV)
311		ret = find_dfls_by_default(pcidev, info);
312
313	if (ret)
314		goto irq_free_exit;
315
316	/* start enumeration with prepared enumeration information */
317	cdev = dfl_fpga_feature_devs_enumerate(info);
318	if (IS_ERR(cdev)) {
319		dev_err(&pcidev->dev, "Enumeration failure\n");
320		ret = PTR_ERR(cdev);
321		goto irq_free_exit;
322	}
323
324	drvdata->cdev = cdev;
325
326irq_free_exit:
327	if (ret)
328		cci_pci_free_irq(pcidev);
329enum_info_free_exit:
330	dfl_fpga_enum_info_free(info);
331
332	return ret;
333}
334
335static
336int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
337{
338	int ret;
339
340	ret = pcim_enable_device(pcidev);
341	if (ret < 0) {
342		dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
343		return ret;
344	}
345
346	ret = pci_enable_pcie_error_reporting(pcidev);
347	if (ret && ret != -EINVAL)
348		dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
349
350	pci_set_master(pcidev);
351
352	if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
353		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
354		if (ret)
355			goto disable_error_report_exit;
356	} else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
357		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
358		if (ret)
359			goto disable_error_report_exit;
360	} else {
361		ret = -EIO;
362		dev_err(&pcidev->dev, "No suitable DMA support available.\n");
363		goto disable_error_report_exit;
364	}
365
366	ret = cci_init_drvdata(pcidev);
367	if (ret) {
368		dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
369		goto disable_error_report_exit;
370	}
371
372	ret = cci_enumerate_feature_devs(pcidev);
373	if (!ret)
374		return ret;
375
376	dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
377
378disable_error_report_exit:
379	pci_disable_pcie_error_reporting(pcidev);
380	return ret;
381}
382
383static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
384{
385	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
386	struct dfl_fpga_cdev *cdev = drvdata->cdev;
387
388	if (!num_vfs) {
389		/*
390		 * disable SRIOV and then put released ports back to default
391		 * PF access mode.
392		 */
393		pci_disable_sriov(pcidev);
394
395		dfl_fpga_cdev_config_ports_pf(cdev);
396
397	} else {
398		int ret;
399
400		/*
401		 * before enable SRIOV, put released ports into VF access mode
402		 * first of all.
403		 */
404		ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
405		if (ret)
406			return ret;
407
408		ret = pci_enable_sriov(pcidev, num_vfs);
409		if (ret) {
410			dfl_fpga_cdev_config_ports_pf(cdev);
411			return ret;
412		}
413	}
414
415	return num_vfs;
416}
417
418static void cci_pci_remove(struct pci_dev *pcidev)
419{
420	if (dev_is_pf(&pcidev->dev))
421		cci_pci_sriov_configure(pcidev, 0);
422
423	cci_remove_feature_devs(pcidev);
424	pci_disable_pcie_error_reporting(pcidev);
425}
426
427static struct pci_driver cci_pci_driver = {
428	.name = DRV_NAME,
429	.id_table = cci_pcie_id_tbl,
430	.probe = cci_pci_probe,
431	.remove = cci_pci_remove,
432	.sriov_configure = cci_pci_sriov_configure,
433};
434
435module_pci_driver(cci_pci_driver);
436
437MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
438MODULE_AUTHOR("Intel Corporation");
439MODULE_LICENSE("GPL v2");