Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) PCIe device
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Zhang Yi <Yi.Z.Zhang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/pci.h>
18#include <linux/dma-mapping.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/stddef.h>
23#include <linux/errno.h>
24
25#include "dfl.h"
26
27#define DRV_VERSION "0.8"
28#define DRV_NAME "dfl-pci"
29
30#define PCI_VSEC_ID_INTEL_DFLS 0x43
31
32#define PCI_VNDR_DFLS_CNT 0x8
33#define PCI_VNDR_DFLS_RES 0xc
34
35#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
36#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
37
38struct cci_drvdata {
39 struct dfl_fpga_cdev *cdev; /* container device */
40};
41
42static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
43{
44 if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
45 return NULL;
46
47 return pcim_iomap_table(pcidev)[0];
48}
49
50static int cci_pci_alloc_irq(struct pci_dev *pcidev)
51{
52 int ret, nvec = pci_msix_vec_count(pcidev);
53
54 if (nvec <= 0) {
55 dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
56 return 0;
57 }
58
59 ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
60 if (ret < 0)
61 return ret;
62
63 return nvec;
64}
65
66static void cci_pci_free_irq(struct pci_dev *pcidev)
67{
68 pci_free_irq_vectors(pcidev);
69}
70
71/* PCI Device ID */
72#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
73#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
74#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
75#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
76#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
77#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000
78#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
79#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
80/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
81#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
82#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
83#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
84
85/* VF Device */
86#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
87#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
88#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
89#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
90#define PCIE_DEVICE_ID_INTEL_DFL_VF 0xbccf
91
92static struct pci_device_id cci_pcie_id_tbl[] = {
93 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
94 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
95 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
96 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
97 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
98 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
99 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
100 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
101 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
102 {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
103 {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
104 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
105 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
106 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
107 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
108 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
109 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
110 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
111 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
112 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
113 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
114 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
115 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
116 {0,}
117};
118MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
119
120static int cci_init_drvdata(struct pci_dev *pcidev)
121{
122 struct cci_drvdata *drvdata;
123
124 drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
125 if (!drvdata)
126 return -ENOMEM;
127
128 pci_set_drvdata(pcidev, drvdata);
129
130 return 0;
131}
132
133static void cci_remove_feature_devs(struct pci_dev *pcidev)
134{
135 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
136
137 /* remove all children feature devices */
138 dfl_fpga_feature_devs_remove(drvdata->cdev);
139 cci_pci_free_irq(pcidev);
140}
141
142static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
143{
144 unsigned int i;
145 int *table;
146
147 table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
148 if (!table)
149 return table;
150
151 for (i = 0; i < nvec; i++)
152 table[i] = pci_irq_vector(pcidev, i);
153
154 return table;
155}
156
157static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
158{
159 u32 bir, offset, dfl_cnt, dfl_res;
160 int dfl_res_off, i, bars, voff;
161 resource_size_t start, len;
162
163 voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
164 PCI_VSEC_ID_INTEL_DFLS);
165 if (!voff) {
166 dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
167 return -ENODEV;
168 }
169
170 dfl_cnt = 0;
171 pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
172 if (dfl_cnt > PCI_STD_NUM_BARS) {
173 dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
174 __func__, dfl_cnt, PCI_STD_NUM_BARS);
175 return -EINVAL;
176 }
177
178 dfl_res_off = voff + PCI_VNDR_DFLS_RES;
179 if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
180 dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
181 __func__);
182 return -EINVAL;
183 }
184
185 for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
186 dfl_res = GENMASK(31, 0);
187 pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
188
189 bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
190 if (bir >= PCI_STD_NUM_BARS) {
191 dev_err(&pcidev->dev, "%s bad bir number %d\n",
192 __func__, bir);
193 return -EINVAL;
194 }
195
196 if (bars & BIT(bir)) {
197 dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
198 __func__, bir);
199 return -EINVAL;
200 }
201
202 bars |= BIT(bir);
203
204 len = pci_resource_len(pcidev, bir);
205 offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
206 if (offset >= len) {
207 dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
208 __func__, offset, &len);
209 return -EINVAL;
210 }
211
212 dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
213
214 len -= offset;
215
216 start = pci_resource_start(pcidev, bir) + offset;
217
218 dfl_fpga_enum_info_add_dfl(info, start, len);
219 }
220
221 return 0;
222}
223
224/* default method of finding dfls starting at offset 0 of bar 0 */
225static int find_dfls_by_default(struct pci_dev *pcidev,
226 struct dfl_fpga_enum_info *info)
227{
228 int port_num, bar, i, ret = 0;
229 resource_size_t start, len;
230 void __iomem *base;
231 u32 offset;
232 u64 v;
233
234 /* start to find Device Feature List from Bar 0 */
235 base = cci_pci_ioremap_bar0(pcidev);
236 if (!base)
237 return -ENOMEM;
238
239 /*
240 * PF device has FME and Ports/AFUs, and VF device only has one
241 * Port/AFU. Check them and add related "Device Feature List" info
242 * for the next step enumeration.
243 */
244 if (dfl_feature_is_fme(base)) {
245 start = pci_resource_start(pcidev, 0);
246 len = pci_resource_len(pcidev, 0);
247
248 dfl_fpga_enum_info_add_dfl(info, start, len);
249
250 /*
251 * find more Device Feature Lists (e.g. Ports) per information
252 * indicated by FME module.
253 */
254 v = readq(base + FME_HDR_CAP);
255 port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
256
257 WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
258
259 for (i = 0; i < port_num; i++) {
260 v = readq(base + FME_HDR_PORT_OFST(i));
261
262 /* skip ports which are not implemented. */
263 if (!(v & FME_PORT_OFST_IMP))
264 continue;
265
266 /*
267 * add Port's Device Feature List information for next
268 * step enumeration.
269 */
270 bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
271 offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
272 if (bar == FME_PORT_OFST_BAR_SKIP) {
273 continue;
274 } else if (bar >= PCI_STD_NUM_BARS) {
275 dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
276 bar, i);
277 ret = -EINVAL;
278 break;
279 }
280
281 start = pci_resource_start(pcidev, bar) + offset;
282 len = pci_resource_len(pcidev, bar) - offset;
283
284 dfl_fpga_enum_info_add_dfl(info, start, len);
285 }
286 } else if (dfl_feature_is_port(base)) {
287 start = pci_resource_start(pcidev, 0);
288 len = pci_resource_len(pcidev, 0);
289
290 dfl_fpga_enum_info_add_dfl(info, start, len);
291 } else {
292 ret = -ENODEV;
293 }
294
295 /* release I/O mappings for next step enumeration */
296 pcim_iounmap_regions(pcidev, BIT(0));
297
298 return ret;
299}
300
301/* enumerate feature devices under pci device */
302static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
303{
304 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
305 struct dfl_fpga_enum_info *info;
306 struct dfl_fpga_cdev *cdev;
307 int nvec, ret = 0;
308 int *irq_table;
309
310 /* allocate enumeration info via pci_dev */
311 info = dfl_fpga_enum_info_alloc(&pcidev->dev);
312 if (!info)
313 return -ENOMEM;
314
315 /* add irq info for enumeration if the device support irq */
316 nvec = cci_pci_alloc_irq(pcidev);
317 if (nvec < 0) {
318 dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
319 ret = nvec;
320 goto enum_info_free_exit;
321 } else if (nvec) {
322 irq_table = cci_pci_create_irq_table(pcidev, nvec);
323 if (!irq_table) {
324 ret = -ENOMEM;
325 goto irq_free_exit;
326 }
327
328 ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
329 kfree(irq_table);
330 if (ret)
331 goto irq_free_exit;
332 }
333
334 ret = find_dfls_by_vsec(pcidev, info);
335 if (ret == -ENODEV)
336 ret = find_dfls_by_default(pcidev, info);
337
338 if (ret)
339 goto irq_free_exit;
340
341 /* start enumeration with prepared enumeration information */
342 cdev = dfl_fpga_feature_devs_enumerate(info);
343 if (IS_ERR(cdev)) {
344 dev_err(&pcidev->dev, "Enumeration failure\n");
345 ret = PTR_ERR(cdev);
346 goto irq_free_exit;
347 }
348
349 drvdata->cdev = cdev;
350
351irq_free_exit:
352 if (ret)
353 cci_pci_free_irq(pcidev);
354enum_info_free_exit:
355 dfl_fpga_enum_info_free(info);
356
357 return ret;
358}
359
360static
361int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
362{
363 int ret;
364
365 ret = pcim_enable_device(pcidev);
366 if (ret < 0) {
367 dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
368 return ret;
369 }
370
371 pci_set_master(pcidev);
372
373 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
374 if (ret)
375 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
376 if (ret) {
377 dev_err(&pcidev->dev, "No suitable DMA support available.\n");
378 return ret;
379 }
380
381 ret = cci_init_drvdata(pcidev);
382 if (ret) {
383 dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
384 return ret;
385 }
386
387 ret = cci_enumerate_feature_devs(pcidev);
388 if (ret) {
389 dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
390 return ret;
391 }
392
393 return 0;
394}
395
396static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
397{
398 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
399 struct dfl_fpga_cdev *cdev = drvdata->cdev;
400
401 if (!num_vfs) {
402 /*
403 * disable SRIOV and then put released ports back to default
404 * PF access mode.
405 */
406 pci_disable_sriov(pcidev);
407
408 dfl_fpga_cdev_config_ports_pf(cdev);
409
410 } else {
411 int ret;
412
413 /*
414 * before enable SRIOV, put released ports into VF access mode
415 * first of all.
416 */
417 ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
418 if (ret)
419 return ret;
420
421 ret = pci_enable_sriov(pcidev, num_vfs);
422 if (ret) {
423 dfl_fpga_cdev_config_ports_pf(cdev);
424 return ret;
425 }
426 }
427
428 return num_vfs;
429}
430
431static void cci_pci_remove(struct pci_dev *pcidev)
432{
433 if (dev_is_pf(&pcidev->dev))
434 cci_pci_sriov_configure(pcidev, 0);
435
436 cci_remove_feature_devs(pcidev);
437}
438
439static struct pci_driver cci_pci_driver = {
440 .name = DRV_NAME,
441 .id_table = cci_pcie_id_tbl,
442 .probe = cci_pci_probe,
443 .remove = cci_pci_remove,
444 .sriov_configure = cci_pci_sriov_configure,
445};
446
447module_pci_driver(cci_pci_driver);
448
449MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
450MODULE_AUTHOR("Intel Corporation");
451MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) PCIe device
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Zhang Yi <Yi.Z.Zhang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/pci.h>
18#include <linux/dma-mapping.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/stddef.h>
23#include <linux/errno.h>
24
25#include "dfl.h"
26
27#define DRV_VERSION "0.8"
28#define DRV_NAME "dfl-pci"
29
30#define PCI_VSEC_ID_INTEL_DFLS 0x43
31
32#define PCI_VNDR_DFLS_CNT 0x8
33#define PCI_VNDR_DFLS_RES 0xc
34
35#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
36#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
37
38struct cci_drvdata {
39 struct dfl_fpga_cdev *cdev; /* container device */
40};
41
42static int cci_pci_alloc_irq(struct pci_dev *pcidev)
43{
44 int ret, nvec = pci_msix_vec_count(pcidev);
45
46 if (nvec <= 0) {
47 dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
48 return 0;
49 }
50
51 ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
52 if (ret < 0)
53 return ret;
54
55 return nvec;
56}
57
58static void cci_pci_free_irq(struct pci_dev *pcidev)
59{
60 pci_free_irq_vectors(pcidev);
61}
62
63/* PCI Device ID */
64#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
65#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
66#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
67#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
68#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
69#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000
70#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
71#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
72/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
73#define PCIE_SUBDEVICE_ID_INTEL_D5005 0x138d
74#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
75#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
76#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
77
78/* VF Device */
79#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
80#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
81#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
82#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
83#define PCIE_DEVICE_ID_INTEL_DFL_VF 0xbccf
84
85static struct pci_device_id cci_pcie_id_tbl[] = {
86 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
87 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
88 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
89 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
90 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
91 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
92 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
93 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
94 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
95 {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
96 {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
97 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
98 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
99 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
100 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
101 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
102 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
103 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
104 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
105 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
106 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
107 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
108 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
109 {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
110 PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
111 {0,}
112};
113MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
114
115static int cci_init_drvdata(struct pci_dev *pcidev)
116{
117 struct cci_drvdata *drvdata;
118
119 drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
120 if (!drvdata)
121 return -ENOMEM;
122
123 pci_set_drvdata(pcidev, drvdata);
124
125 return 0;
126}
127
128static void cci_remove_feature_devs(struct pci_dev *pcidev)
129{
130 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
131
132 /* remove all children feature devices */
133 dfl_fpga_feature_devs_remove(drvdata->cdev);
134 cci_pci_free_irq(pcidev);
135}
136
137static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
138{
139 unsigned int i;
140 int *table;
141
142 table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
143 if (!table)
144 return table;
145
146 for (i = 0; i < nvec; i++)
147 table[i] = pci_irq_vector(pcidev, i);
148
149 return table;
150}
151
152static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
153{
154 u32 bir, offset, dfl_cnt, dfl_res;
155 int dfl_res_off, i, bars, voff;
156 resource_size_t start, len;
157
158 voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
159 PCI_VSEC_ID_INTEL_DFLS);
160 if (!voff) {
161 dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
162 return -ENODEV;
163 }
164
165 dfl_cnt = 0;
166 pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
167 if (dfl_cnt > PCI_STD_NUM_BARS) {
168 dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
169 __func__, dfl_cnt, PCI_STD_NUM_BARS);
170 return -EINVAL;
171 }
172
173 dfl_res_off = voff + PCI_VNDR_DFLS_RES;
174 if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
175 dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
176 __func__);
177 return -EINVAL;
178 }
179
180 for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
181 dfl_res = GENMASK(31, 0);
182 pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
183
184 bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
185 if (bir >= PCI_STD_NUM_BARS) {
186 dev_err(&pcidev->dev, "%s bad bir number %d\n",
187 __func__, bir);
188 return -EINVAL;
189 }
190
191 if (bars & BIT(bir)) {
192 dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
193 __func__, bir);
194 return -EINVAL;
195 }
196
197 bars |= BIT(bir);
198
199 len = pci_resource_len(pcidev, bir);
200 offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
201 if (offset >= len) {
202 dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
203 __func__, offset, &len);
204 return -EINVAL;
205 }
206
207 dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
208
209 len -= offset;
210
211 start = pci_resource_start(pcidev, bir) + offset;
212
213 dfl_fpga_enum_info_add_dfl(info, start, len);
214 }
215
216 return 0;
217}
218
219/* default method of finding dfls starting at offset 0 of bar 0 */
220static int find_dfls_by_default(struct pci_dev *pcidev,
221 struct dfl_fpga_enum_info *info)
222{
223 int port_num, bar, i, ret = 0;
224 resource_size_t start, len;
225 void __iomem *base;
226 u32 offset;
227 u64 v;
228
229 /* start to find Device Feature List from Bar 0 */
230 base = pcim_iomap_region(pcidev, 0, DRV_NAME);
231 if (IS_ERR(base))
232 return PTR_ERR(base);
233
234 /*
235 * PF device has FME and Ports/AFUs, and VF device only has one
236 * Port/AFU. Check them and add related "Device Feature List" info
237 * for the next step enumeration.
238 */
239 if (dfl_feature_is_fme(base)) {
240 start = pci_resource_start(pcidev, 0);
241 len = pci_resource_len(pcidev, 0);
242
243 dfl_fpga_enum_info_add_dfl(info, start, len);
244
245 /*
246 * find more Device Feature Lists (e.g. Ports) per information
247 * indicated by FME module.
248 */
249 v = readq(base + FME_HDR_CAP);
250 port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
251
252 WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
253
254 for (i = 0; i < port_num; i++) {
255 v = readq(base + FME_HDR_PORT_OFST(i));
256
257 /* skip ports which are not implemented. */
258 if (!(v & FME_PORT_OFST_IMP))
259 continue;
260
261 /*
262 * add Port's Device Feature List information for next
263 * step enumeration.
264 */
265 bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
266 offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
267 if (bar == FME_PORT_OFST_BAR_SKIP) {
268 continue;
269 } else if (bar >= PCI_STD_NUM_BARS) {
270 dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
271 bar, i);
272 ret = -EINVAL;
273 break;
274 }
275
276 start = pci_resource_start(pcidev, bar) + offset;
277 len = pci_resource_len(pcidev, bar) - offset;
278
279 dfl_fpga_enum_info_add_dfl(info, start, len);
280 }
281 } else if (dfl_feature_is_port(base)) {
282 start = pci_resource_start(pcidev, 0);
283 len = pci_resource_len(pcidev, 0);
284
285 dfl_fpga_enum_info_add_dfl(info, start, len);
286 } else {
287 ret = -ENODEV;
288 }
289
290 /* release I/O mappings for next step enumeration */
291 pcim_iounmap_region(pcidev, 0);
292
293 return ret;
294}
295
296/* enumerate feature devices under pci device */
297static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
298{
299 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
300 struct dfl_fpga_enum_info *info;
301 struct dfl_fpga_cdev *cdev;
302 int nvec, ret = 0;
303 int *irq_table;
304
305 /* allocate enumeration info via pci_dev */
306 info = dfl_fpga_enum_info_alloc(&pcidev->dev);
307 if (!info)
308 return -ENOMEM;
309
310 /* add irq info for enumeration if the device support irq */
311 nvec = cci_pci_alloc_irq(pcidev);
312 if (nvec < 0) {
313 dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
314 ret = nvec;
315 goto enum_info_free_exit;
316 } else if (nvec) {
317 irq_table = cci_pci_create_irq_table(pcidev, nvec);
318 if (!irq_table) {
319 ret = -ENOMEM;
320 goto irq_free_exit;
321 }
322
323 ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
324 kfree(irq_table);
325 if (ret)
326 goto irq_free_exit;
327 }
328
329 ret = find_dfls_by_vsec(pcidev, info);
330 if (ret == -ENODEV)
331 ret = find_dfls_by_default(pcidev, info);
332
333 if (ret)
334 goto irq_free_exit;
335
336 /* start enumeration with prepared enumeration information */
337 cdev = dfl_fpga_feature_devs_enumerate(info);
338 if (IS_ERR(cdev)) {
339 dev_err(&pcidev->dev, "Enumeration failure\n");
340 ret = PTR_ERR(cdev);
341 goto irq_free_exit;
342 }
343
344 drvdata->cdev = cdev;
345
346irq_free_exit:
347 if (ret)
348 cci_pci_free_irq(pcidev);
349enum_info_free_exit:
350 dfl_fpga_enum_info_free(info);
351
352 return ret;
353}
354
355static
356int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
357{
358 int ret;
359
360 ret = pcim_enable_device(pcidev);
361 if (ret < 0) {
362 dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
363 return ret;
364 }
365
366 pci_set_master(pcidev);
367
368 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
369 if (ret)
370 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
371 if (ret) {
372 dev_err(&pcidev->dev, "No suitable DMA support available.\n");
373 return ret;
374 }
375
376 ret = cci_init_drvdata(pcidev);
377 if (ret) {
378 dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
379 return ret;
380 }
381
382 ret = cci_enumerate_feature_devs(pcidev);
383 if (ret) {
384 dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
385 return ret;
386 }
387
388 return 0;
389}
390
391static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
392{
393 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
394 struct dfl_fpga_cdev *cdev = drvdata->cdev;
395
396 if (!num_vfs) {
397 /*
398 * disable SRIOV and then put released ports back to default
399 * PF access mode.
400 */
401 pci_disable_sriov(pcidev);
402
403 dfl_fpga_cdev_config_ports_pf(cdev);
404
405 } else {
406 int ret;
407
408 /*
409 * before enable SRIOV, put released ports into VF access mode
410 * first of all.
411 */
412 ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
413 if (ret)
414 return ret;
415
416 ret = pci_enable_sriov(pcidev, num_vfs);
417 if (ret) {
418 dfl_fpga_cdev_config_ports_pf(cdev);
419 return ret;
420 }
421 }
422
423 return num_vfs;
424}
425
426static void cci_pci_remove(struct pci_dev *pcidev)
427{
428 if (dev_is_pf(&pcidev->dev))
429 cci_pci_sriov_configure(pcidev, 0);
430
431 cci_remove_feature_devs(pcidev);
432}
433
434static struct pci_driver cci_pci_driver = {
435 .name = DRV_NAME,
436 .id_table = cci_pcie_id_tbl,
437 .probe = cci_pci_probe,
438 .remove = cci_pci_remove,
439 .sriov_configure = cci_pci_sriov_configure,
440};
441
442module_pci_driver(cci_pci_driver);
443
444MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
445MODULE_AUTHOR("Intel Corporation");
446MODULE_LICENSE("GPL v2");