Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Device Feature List (DFL) PCIe device
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Zhang Yi <Yi.Z.Zhang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Joseph Grecco <joe.grecco@intel.com>
 11 *   Enno Luebbers <enno.luebbers@intel.com>
 12 *   Tim Whisonant <tim.whisonant@intel.com>
 13 *   Ananda Ravuri <ananda.ravuri@intel.com>
 14 *   Henry Mitchel <henry.mitchel@intel.com>
 15 */
 16
 17#include <linux/pci.h>
 
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/stddef.h>
 22#include <linux/errno.h>
 23#include <linux/aer.h>
 24
 25#include "dfl.h"
 26
 27#define DRV_VERSION	"0.8"
 28#define DRV_NAME	"dfl-pci"
 29
 30#define PCI_VSEC_ID_INTEL_DFLS 0x43
 31
 32#define PCI_VNDR_DFLS_CNT 0x8
 33#define PCI_VNDR_DFLS_RES 0xc
 34
 35#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
 36#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
 37
 38struct cci_drvdata {
 39	struct dfl_fpga_cdev *cdev;	/* container device */
 40};
 41
 42static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
 43{
 44	if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
 45		return NULL;
 46
 47	return pcim_iomap_table(pcidev)[0];
 48}
 49
 50static int cci_pci_alloc_irq(struct pci_dev *pcidev)
 51{
 52	int ret, nvec = pci_msix_vec_count(pcidev);
 53
 54	if (nvec <= 0) {
 55		dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
 56		return 0;
 57	}
 58
 59	ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
 60	if (ret < 0)
 61		return ret;
 62
 63	return nvec;
 64}
 65
 66static void cci_pci_free_irq(struct pci_dev *pcidev)
 67{
 68	pci_free_irq_vectors(pcidev);
 69}
 70
 71/* PCI Device ID */
 72#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
 73#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
 74#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
 75#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
 76#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
 
 
 
 
 
 
 
 
 
 77/* VF Device */
 78#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
 79#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
 80#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
 81#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
 
 82
 83static struct pci_device_id cci_pcie_id_tbl[] = {
 84	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
 85	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
 86	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
 87	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
 88	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
 89	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
 90	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
 91	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
 92	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93	{0,}
 94};
 95MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
 96
 97static int cci_init_drvdata(struct pci_dev *pcidev)
 98{
 99	struct cci_drvdata *drvdata;
100
101	drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
102	if (!drvdata)
103		return -ENOMEM;
104
105	pci_set_drvdata(pcidev, drvdata);
106
107	return 0;
108}
109
110static void cci_remove_feature_devs(struct pci_dev *pcidev)
111{
112	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
113
114	/* remove all children feature devices */
115	dfl_fpga_feature_devs_remove(drvdata->cdev);
116	cci_pci_free_irq(pcidev);
117}
118
119static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
120{
121	unsigned int i;
122	int *table;
123
124	table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
125	if (!table)
126		return table;
127
128	for (i = 0; i < nvec; i++)
129		table[i] = pci_irq_vector(pcidev, i);
130
131	return table;
132}
133
134static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
135{
136	u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
137	int dfl_res_off, i, bars, voff = 0;
138	resource_size_t start, len;
139
140	while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
141		vndr_hdr = 0;
142		pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
143
144		if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
145		    pcidev->vendor == PCI_VENDOR_ID_INTEL)
146			break;
147	}
148
149	if (!voff) {
150		dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
151		return -ENODEV;
152	}
153
154	dfl_cnt = 0;
155	pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
156	if (dfl_cnt > PCI_STD_NUM_BARS) {
157		dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
158			__func__, dfl_cnt, PCI_STD_NUM_BARS);
159		return -EINVAL;
160	}
161
162	dfl_res_off = voff + PCI_VNDR_DFLS_RES;
163	if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
164		dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
165			__func__);
166		return -EINVAL;
167	}
168
169	for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
170		dfl_res = GENMASK(31, 0);
171		pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
172
173		bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
174		if (bir >= PCI_STD_NUM_BARS) {
175			dev_err(&pcidev->dev, "%s bad bir number %d\n",
176				__func__, bir);
177			return -EINVAL;
178		}
179
180		if (bars & BIT(bir)) {
181			dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
182				__func__, bir);
183			return -EINVAL;
184		}
185
186		bars |= BIT(bir);
187
188		len = pci_resource_len(pcidev, bir);
189		offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
190		if (offset >= len) {
191			dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
192				__func__, offset, &len);
193			return -EINVAL;
194		}
195
196		dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
197
198		len -= offset;
199
200		start = pci_resource_start(pcidev, bir) + offset;
201
202		dfl_fpga_enum_info_add_dfl(info, start, len);
203	}
204
205	return 0;
206}
207
208/* default method of finding dfls starting at offset 0 of bar 0 */
209static int find_dfls_by_default(struct pci_dev *pcidev,
210				struct dfl_fpga_enum_info *info)
211{
212	int port_num, bar, i, ret = 0;
213	resource_size_t start, len;
214	void __iomem *base;
215	u32 offset;
216	u64 v;
217
218	/* start to find Device Feature List from Bar 0 */
219	base = cci_pci_ioremap_bar0(pcidev);
220	if (!base)
221		return -ENOMEM;
222
223	/*
224	 * PF device has FME and Ports/AFUs, and VF device only has one
225	 * Port/AFU. Check them and add related "Device Feature List" info
226	 * for the next step enumeration.
227	 */
228	if (dfl_feature_is_fme(base)) {
229		start = pci_resource_start(pcidev, 0);
230		len = pci_resource_len(pcidev, 0);
231
232		dfl_fpga_enum_info_add_dfl(info, start, len);
233
234		/*
235		 * find more Device Feature Lists (e.g. Ports) per information
236		 * indicated by FME module.
237		 */
238		v = readq(base + FME_HDR_CAP);
239		port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
240
241		WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
242
243		for (i = 0; i < port_num; i++) {
244			v = readq(base + FME_HDR_PORT_OFST(i));
245
246			/* skip ports which are not implemented. */
247			if (!(v & FME_PORT_OFST_IMP))
248				continue;
249
250			/*
251			 * add Port's Device Feature List information for next
252			 * step enumeration.
253			 */
254			bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
255			offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
 
 
 
 
 
 
 
 
 
256			start = pci_resource_start(pcidev, bar) + offset;
257			len = pci_resource_len(pcidev, bar) - offset;
258
259			dfl_fpga_enum_info_add_dfl(info, start, len);
260		}
261	} else if (dfl_feature_is_port(base)) {
262		start = pci_resource_start(pcidev, 0);
263		len = pci_resource_len(pcidev, 0);
264
265		dfl_fpga_enum_info_add_dfl(info, start, len);
266	} else {
267		ret = -ENODEV;
268	}
269
270	/* release I/O mappings for next step enumeration */
271	pcim_iounmap_regions(pcidev, BIT(0));
272
273	return ret;
274}
275
276/* enumerate feature devices under pci device */
277static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
278{
279	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
280	struct dfl_fpga_enum_info *info;
281	struct dfl_fpga_cdev *cdev;
282	int nvec, ret = 0;
283	int *irq_table;
284
285	/* allocate enumeration info via pci_dev */
286	info = dfl_fpga_enum_info_alloc(&pcidev->dev);
287	if (!info)
288		return -ENOMEM;
289
290	/* add irq info for enumeration if the device support irq */
291	nvec = cci_pci_alloc_irq(pcidev);
292	if (nvec < 0) {
293		dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
294		ret = nvec;
295		goto enum_info_free_exit;
296	} else if (nvec) {
297		irq_table = cci_pci_create_irq_table(pcidev, nvec);
298		if (!irq_table) {
299			ret = -ENOMEM;
300			goto irq_free_exit;
301		}
302
303		ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
304		kfree(irq_table);
305		if (ret)
306			goto irq_free_exit;
307	}
308
309	ret = find_dfls_by_vsec(pcidev, info);
310	if (ret == -ENODEV)
311		ret = find_dfls_by_default(pcidev, info);
312
313	if (ret)
314		goto irq_free_exit;
315
316	/* start enumeration with prepared enumeration information */
317	cdev = dfl_fpga_feature_devs_enumerate(info);
318	if (IS_ERR(cdev)) {
319		dev_err(&pcidev->dev, "Enumeration failure\n");
320		ret = PTR_ERR(cdev);
321		goto irq_free_exit;
322	}
323
324	drvdata->cdev = cdev;
325
326irq_free_exit:
327	if (ret)
328		cci_pci_free_irq(pcidev);
329enum_info_free_exit:
330	dfl_fpga_enum_info_free(info);
331
332	return ret;
333}
334
335static
336int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
337{
338	int ret;
339
340	ret = pcim_enable_device(pcidev);
341	if (ret < 0) {
342		dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
343		return ret;
344	}
345
346	ret = pci_enable_pcie_error_reporting(pcidev);
347	if (ret && ret != -EINVAL)
348		dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
349
350	pci_set_master(pcidev);
351
352	if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
353		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
354		if (ret)
355			goto disable_error_report_exit;
356	} else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
357		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
358		if (ret)
359			goto disable_error_report_exit;
360	} else {
361		ret = -EIO;
362		dev_err(&pcidev->dev, "No suitable DMA support available.\n");
363		goto disable_error_report_exit;
364	}
365
366	ret = cci_init_drvdata(pcidev);
367	if (ret) {
368		dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
369		goto disable_error_report_exit;
370	}
371
372	ret = cci_enumerate_feature_devs(pcidev);
373	if (!ret)
 
374		return ret;
 
375
376	dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
377
378disable_error_report_exit:
379	pci_disable_pcie_error_reporting(pcidev);
380	return ret;
381}
382
383static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
384{
385	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
386	struct dfl_fpga_cdev *cdev = drvdata->cdev;
387
388	if (!num_vfs) {
389		/*
390		 * disable SRIOV and then put released ports back to default
391		 * PF access mode.
392		 */
393		pci_disable_sriov(pcidev);
394
395		dfl_fpga_cdev_config_ports_pf(cdev);
396
397	} else {
398		int ret;
399
400		/*
401		 * before enable SRIOV, put released ports into VF access mode
402		 * first of all.
403		 */
404		ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
405		if (ret)
406			return ret;
407
408		ret = pci_enable_sriov(pcidev, num_vfs);
409		if (ret) {
410			dfl_fpga_cdev_config_ports_pf(cdev);
411			return ret;
412		}
413	}
414
415	return num_vfs;
416}
417
418static void cci_pci_remove(struct pci_dev *pcidev)
419{
420	if (dev_is_pf(&pcidev->dev))
421		cci_pci_sriov_configure(pcidev, 0);
422
423	cci_remove_feature_devs(pcidev);
424	pci_disable_pcie_error_reporting(pcidev);
425}
426
427static struct pci_driver cci_pci_driver = {
428	.name = DRV_NAME,
429	.id_table = cci_pcie_id_tbl,
430	.probe = cci_pci_probe,
431	.remove = cci_pci_remove,
432	.sriov_configure = cci_pci_sriov_configure,
433};
434
435module_pci_driver(cci_pci_driver);
436
437MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
438MODULE_AUTHOR("Intel Corporation");
439MODULE_LICENSE("GPL v2");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Device Feature List (DFL) PCIe device
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Zhang Yi <Yi.Z.Zhang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Joseph Grecco <joe.grecco@intel.com>
 11 *   Enno Luebbers <enno.luebbers@intel.com>
 12 *   Tim Whisonant <tim.whisonant@intel.com>
 13 *   Ananda Ravuri <ananda.ravuri@intel.com>
 14 *   Henry Mitchel <henry.mitchel@intel.com>
 15 */
 16
 17#include <linux/pci.h>
 18#include <linux/dma-mapping.h>
 19#include <linux/types.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/stddef.h>
 23#include <linux/errno.h>
 
 24
 25#include "dfl.h"
 26
 27#define DRV_VERSION	"0.8"
 28#define DRV_NAME	"dfl-pci"
 29
 30#define PCI_VSEC_ID_INTEL_DFLS 0x43
 31
 32#define PCI_VNDR_DFLS_CNT 0x8
 33#define PCI_VNDR_DFLS_RES 0xc
 34
 35#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
 36#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
 37
 38struct cci_drvdata {
 39	struct dfl_fpga_cdev *cdev;	/* container device */
 40};
 41
 42static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
 43{
 44	if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
 45		return NULL;
 46
 47	return pcim_iomap_table(pcidev)[0];
 48}
 49
 50static int cci_pci_alloc_irq(struct pci_dev *pcidev)
 51{
 52	int ret, nvec = pci_msix_vec_count(pcidev);
 53
 54	if (nvec <= 0) {
 55		dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
 56		return 0;
 57	}
 58
 59	ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
 60	if (ret < 0)
 61		return ret;
 62
 63	return nvec;
 64}
 65
 66static void cci_pci_free_irq(struct pci_dev *pcidev)
 67{
 68	pci_free_irq_vectors(pcidev);
 69}
 70
 71/* PCI Device ID */
 72#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
 73#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
 74#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
 75#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
 76#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
 77#define PCIE_DEVICE_ID_SILICOM_PAC_N5010	0x1000
 78#define PCIE_DEVICE_ID_SILICOM_PAC_N5011	0x1001
 79#define PCIE_DEVICE_ID_INTEL_DFL		0xbcce
 80/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
 81#define PCIE_SUBDEVICE_ID_INTEL_D5005		0x138d
 82#define PCIE_SUBDEVICE_ID_INTEL_N6000		0x1770
 83#define PCIE_SUBDEVICE_ID_INTEL_N6001		0x1771
 84#define PCIE_SUBDEVICE_ID_INTEL_C6100		0x17d4
 85
 86/* VF Device */
 87#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
 88#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
 89#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
 90#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
 91#define PCIE_DEVICE_ID_INTEL_DFL_VF		0xbccf
 92
 93static struct pci_device_id cci_pcie_id_tbl[] = {
 94	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
 95	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
 96	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
 97	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
 98	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
 99	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
100	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
101	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
102	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
103	{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
104	{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
105	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
106			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
107	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
108			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
109	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
110			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
111	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
112			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
113	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
114			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
115	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
116			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
117	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
118			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
119	{0,}
120};
121MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
122
123static int cci_init_drvdata(struct pci_dev *pcidev)
124{
125	struct cci_drvdata *drvdata;
126
127	drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
128	if (!drvdata)
129		return -ENOMEM;
130
131	pci_set_drvdata(pcidev, drvdata);
132
133	return 0;
134}
135
136static void cci_remove_feature_devs(struct pci_dev *pcidev)
137{
138	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
139
140	/* remove all children feature devices */
141	dfl_fpga_feature_devs_remove(drvdata->cdev);
142	cci_pci_free_irq(pcidev);
143}
144
145static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
146{
147	unsigned int i;
148	int *table;
149
150	table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
151	if (!table)
152		return table;
153
154	for (i = 0; i < nvec; i++)
155		table[i] = pci_irq_vector(pcidev, i);
156
157	return table;
158}
159
160static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
161{
162	u32 bir, offset, dfl_cnt, dfl_res;
163	int dfl_res_off, i, bars, voff;
164	resource_size_t start, len;
165
166	voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
167					PCI_VSEC_ID_INTEL_DFLS);
 
 
 
 
 
 
 
168	if (!voff) {
169		dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
170		return -ENODEV;
171	}
172
173	dfl_cnt = 0;
174	pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
175	if (dfl_cnt > PCI_STD_NUM_BARS) {
176		dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
177			__func__, dfl_cnt, PCI_STD_NUM_BARS);
178		return -EINVAL;
179	}
180
181	dfl_res_off = voff + PCI_VNDR_DFLS_RES;
182	if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
183		dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
184			__func__);
185		return -EINVAL;
186	}
187
188	for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
189		dfl_res = GENMASK(31, 0);
190		pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
191
192		bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
193		if (bir >= PCI_STD_NUM_BARS) {
194			dev_err(&pcidev->dev, "%s bad bir number %d\n",
195				__func__, bir);
196			return -EINVAL;
197		}
198
199		if (bars & BIT(bir)) {
200			dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
201				__func__, bir);
202			return -EINVAL;
203		}
204
205		bars |= BIT(bir);
206
207		len = pci_resource_len(pcidev, bir);
208		offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
209		if (offset >= len) {
210			dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
211				__func__, offset, &len);
212			return -EINVAL;
213		}
214
215		dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
216
217		len -= offset;
218
219		start = pci_resource_start(pcidev, bir) + offset;
220
221		dfl_fpga_enum_info_add_dfl(info, start, len);
222	}
223
224	return 0;
225}
226
227/* default method of finding dfls starting at offset 0 of bar 0 */
228static int find_dfls_by_default(struct pci_dev *pcidev,
229				struct dfl_fpga_enum_info *info)
230{
231	int port_num, bar, i, ret = 0;
232	resource_size_t start, len;
233	void __iomem *base;
234	u32 offset;
235	u64 v;
236
237	/* start to find Device Feature List from Bar 0 */
238	base = cci_pci_ioremap_bar0(pcidev);
239	if (!base)
240		return -ENOMEM;
241
242	/*
243	 * PF device has FME and Ports/AFUs, and VF device only has one
244	 * Port/AFU. Check them and add related "Device Feature List" info
245	 * for the next step enumeration.
246	 */
247	if (dfl_feature_is_fme(base)) {
248		start = pci_resource_start(pcidev, 0);
249		len = pci_resource_len(pcidev, 0);
250
251		dfl_fpga_enum_info_add_dfl(info, start, len);
252
253		/*
254		 * find more Device Feature Lists (e.g. Ports) per information
255		 * indicated by FME module.
256		 */
257		v = readq(base + FME_HDR_CAP);
258		port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
259
260		WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
261
262		for (i = 0; i < port_num; i++) {
263			v = readq(base + FME_HDR_PORT_OFST(i));
264
265			/* skip ports which are not implemented. */
266			if (!(v & FME_PORT_OFST_IMP))
267				continue;
268
269			/*
270			 * add Port's Device Feature List information for next
271			 * step enumeration.
272			 */
273			bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
274			offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
275			if (bar == FME_PORT_OFST_BAR_SKIP) {
276				continue;
277			} else if (bar >= PCI_STD_NUM_BARS) {
278				dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
279					bar, i);
280				ret = -EINVAL;
281				break;
282			}
283
284			start = pci_resource_start(pcidev, bar) + offset;
285			len = pci_resource_len(pcidev, bar) - offset;
286
287			dfl_fpga_enum_info_add_dfl(info, start, len);
288		}
289	} else if (dfl_feature_is_port(base)) {
290		start = pci_resource_start(pcidev, 0);
291		len = pci_resource_len(pcidev, 0);
292
293		dfl_fpga_enum_info_add_dfl(info, start, len);
294	} else {
295		ret = -ENODEV;
296	}
297
298	/* release I/O mappings for next step enumeration */
299	pcim_iounmap_regions(pcidev, BIT(0));
300
301	return ret;
302}
303
304/* enumerate feature devices under pci device */
305static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
306{
307	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
308	struct dfl_fpga_enum_info *info;
309	struct dfl_fpga_cdev *cdev;
310	int nvec, ret = 0;
311	int *irq_table;
312
313	/* allocate enumeration info via pci_dev */
314	info = dfl_fpga_enum_info_alloc(&pcidev->dev);
315	if (!info)
316		return -ENOMEM;
317
318	/* add irq info for enumeration if the device support irq */
319	nvec = cci_pci_alloc_irq(pcidev);
320	if (nvec < 0) {
321		dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
322		ret = nvec;
323		goto enum_info_free_exit;
324	} else if (nvec) {
325		irq_table = cci_pci_create_irq_table(pcidev, nvec);
326		if (!irq_table) {
327			ret = -ENOMEM;
328			goto irq_free_exit;
329		}
330
331		ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
332		kfree(irq_table);
333		if (ret)
334			goto irq_free_exit;
335	}
336
337	ret = find_dfls_by_vsec(pcidev, info);
338	if (ret == -ENODEV)
339		ret = find_dfls_by_default(pcidev, info);
340
341	if (ret)
342		goto irq_free_exit;
343
344	/* start enumeration with prepared enumeration information */
345	cdev = dfl_fpga_feature_devs_enumerate(info);
346	if (IS_ERR(cdev)) {
347		dev_err(&pcidev->dev, "Enumeration failure\n");
348		ret = PTR_ERR(cdev);
349		goto irq_free_exit;
350	}
351
352	drvdata->cdev = cdev;
353
354irq_free_exit:
355	if (ret)
356		cci_pci_free_irq(pcidev);
357enum_info_free_exit:
358	dfl_fpga_enum_info_free(info);
359
360	return ret;
361}
362
363static
364int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
365{
366	int ret;
367
368	ret = pcim_enable_device(pcidev);
369	if (ret < 0) {
370		dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
371		return ret;
372	}
373
 
 
 
 
374	pci_set_master(pcidev);
375
376	ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
377	if (ret)
378		ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
379	if (ret) {
 
 
 
 
 
 
380		dev_err(&pcidev->dev, "No suitable DMA support available.\n");
381		return ret;
382	}
383
384	ret = cci_init_drvdata(pcidev);
385	if (ret) {
386		dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
387		return ret;
388	}
389
390	ret = cci_enumerate_feature_devs(pcidev);
391	if (ret) {
392		dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
393		return ret;
394	}
395
396	return 0;
 
 
 
 
397}
398
399static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
400{
401	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
402	struct dfl_fpga_cdev *cdev = drvdata->cdev;
403
404	if (!num_vfs) {
405		/*
406		 * disable SRIOV and then put released ports back to default
407		 * PF access mode.
408		 */
409		pci_disable_sriov(pcidev);
410
411		dfl_fpga_cdev_config_ports_pf(cdev);
412
413	} else {
414		int ret;
415
416		/*
417		 * before enable SRIOV, put released ports into VF access mode
418		 * first of all.
419		 */
420		ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
421		if (ret)
422			return ret;
423
424		ret = pci_enable_sriov(pcidev, num_vfs);
425		if (ret) {
426			dfl_fpga_cdev_config_ports_pf(cdev);
427			return ret;
428		}
429	}
430
431	return num_vfs;
432}
433
434static void cci_pci_remove(struct pci_dev *pcidev)
435{
436	if (dev_is_pf(&pcidev->dev))
437		cci_pci_sriov_configure(pcidev, 0);
438
439	cci_remove_feature_devs(pcidev);
 
440}
441
442static struct pci_driver cci_pci_driver = {
443	.name = DRV_NAME,
444	.id_table = cci_pcie_id_tbl,
445	.probe = cci_pci_probe,
446	.remove = cci_pci_remove,
447	.sriov_configure = cci_pci_sriov_configure,
448};
449
450module_pci_driver(cci_pci_driver);
451
452MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
453MODULE_AUTHOR("Intel Corporation");
454MODULE_LICENSE("GPL v2");