Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) Support
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
12 */
13#include <linux/fpga-dfl.h>
14#include <linux/module.h>
15#include <linux/uaccess.h>
16
17#include "dfl.h"
18
19static DEFINE_MUTEX(dfl_id_mutex);
20
21/*
22 * when adding a new feature dev support in DFL framework, it's required to
23 * add a new item in enum dfl_id_type and provide related information in below
24 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
25 * platform device creation (define name strings in dfl.h, as they could be
26 * reused by platform device drivers).
27 *
28 * if the new feature dev needs chardev support, then it's required to add
29 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
30 * index to dfl_chardevs table. If no chardev support just set devt_type
31 * as one invalid index (DFL_FPGA_DEVT_MAX).
32 */
33enum dfl_id_type {
34 FME_ID, /* fme id allocation and mapping */
35 PORT_ID, /* port id allocation and mapping */
36 DFL_ID_MAX,
37};
38
39enum dfl_fpga_devt_type {
40 DFL_FPGA_DEVT_FME,
41 DFL_FPGA_DEVT_PORT,
42 DFL_FPGA_DEVT_MAX,
43};
44
45static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
46
47static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
48 "dfl-fme-pdata",
49 "dfl-port-pdata",
50};
51
52/**
53 * dfl_dev_info - dfl feature device information.
54 * @name: name string of the feature platform device.
55 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
56 * @id: idr id of the feature dev.
57 * @devt_type: index to dfl_chrdevs[].
58 */
59struct dfl_dev_info {
60 const char *name;
61 u32 dfh_id;
62 struct idr id;
63 enum dfl_fpga_devt_type devt_type;
64};
65
66/* it is indexed by dfl_id_type */
67static struct dfl_dev_info dfl_devs[] = {
68 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
69 .devt_type = DFL_FPGA_DEVT_FME},
70 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
71 .devt_type = DFL_FPGA_DEVT_PORT},
72};
73
74/**
75 * dfl_chardev_info - chardev information of dfl feature device
76 * @name: nmae string of the char device.
77 * @devt: devt of the char device.
78 */
79struct dfl_chardev_info {
80 const char *name;
81 dev_t devt;
82};
83
84/* indexed by enum dfl_fpga_devt_type */
85static struct dfl_chardev_info dfl_chrdevs[] = {
86 {.name = DFL_FPGA_FEATURE_DEV_FME},
87 {.name = DFL_FPGA_FEATURE_DEV_PORT},
88};
89
90static void dfl_ids_init(void)
91{
92 int i;
93
94 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
95 idr_init(&dfl_devs[i].id);
96}
97
98static void dfl_ids_destroy(void)
99{
100 int i;
101
102 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
103 idr_destroy(&dfl_devs[i].id);
104}
105
106static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
107{
108 int id;
109
110 WARN_ON(type >= DFL_ID_MAX);
111 mutex_lock(&dfl_id_mutex);
112 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
113 mutex_unlock(&dfl_id_mutex);
114
115 return id;
116}
117
118static void dfl_id_free(enum dfl_id_type type, int id)
119{
120 WARN_ON(type >= DFL_ID_MAX);
121 mutex_lock(&dfl_id_mutex);
122 idr_remove(&dfl_devs[type].id, id);
123 mutex_unlock(&dfl_id_mutex);
124}
125
126static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
127{
128 int i;
129
130 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
131 if (!strcmp(dfl_devs[i].name, pdev->name))
132 return i;
133
134 return DFL_ID_MAX;
135}
136
137static enum dfl_id_type dfh_id_to_type(u32 id)
138{
139 int i;
140
141 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
142 if (dfl_devs[i].dfh_id == id)
143 return i;
144
145 return DFL_ID_MAX;
146}
147
148/*
149 * introduce a global port_ops list, it allows port drivers to register ops
150 * in such list, then other feature devices (e.g. FME), could use the port
151 * functions even related port platform device is hidden. Below is one example,
152 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
153 * enabled, port (and it's AFU) is turned into VF and port platform device
154 * is hidden from system but it's still required to access port to finish FPGA
155 * reconfiguration function in FME.
156 */
157
158static DEFINE_MUTEX(dfl_port_ops_mutex);
159static LIST_HEAD(dfl_port_ops_list);
160
161/**
162 * dfl_fpga_port_ops_get - get matched port ops from the global list
163 * @pdev: platform device to match with associated port ops.
164 * Return: matched port ops on success, NULL otherwise.
165 *
166 * Please note that must dfl_fpga_port_ops_put after use the port_ops.
167 */
168struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
169{
170 struct dfl_fpga_port_ops *ops = NULL;
171
172 mutex_lock(&dfl_port_ops_mutex);
173 if (list_empty(&dfl_port_ops_list))
174 goto done;
175
176 list_for_each_entry(ops, &dfl_port_ops_list, node) {
177 /* match port_ops using the name of platform device */
178 if (!strcmp(pdev->name, ops->name)) {
179 if (!try_module_get(ops->owner))
180 ops = NULL;
181 goto done;
182 }
183 }
184
185 ops = NULL;
186done:
187 mutex_unlock(&dfl_port_ops_mutex);
188 return ops;
189}
190EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
191
192/**
193 * dfl_fpga_port_ops_put - put port ops
194 * @ops: port ops.
195 */
196void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
197{
198 if (ops && ops->owner)
199 module_put(ops->owner);
200}
201EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
202
203/**
204 * dfl_fpga_port_ops_add - add port_ops to global list
205 * @ops: port ops to add.
206 */
207void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
208{
209 mutex_lock(&dfl_port_ops_mutex);
210 list_add_tail(&ops->node, &dfl_port_ops_list);
211 mutex_unlock(&dfl_port_ops_mutex);
212}
213EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
214
215/**
216 * dfl_fpga_port_ops_del - remove port_ops from global list
217 * @ops: port ops to del.
218 */
219void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
220{
221 mutex_lock(&dfl_port_ops_mutex);
222 list_del(&ops->node);
223 mutex_unlock(&dfl_port_ops_mutex);
224}
225EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
226
227/**
228 * dfl_fpga_check_port_id - check the port id
229 * @pdev: port platform device.
230 * @pport_id: port id to compare.
231 *
232 * Return: 1 if port device matches with given port id, otherwise 0.
233 */
234int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
235{
236 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
237 struct dfl_fpga_port_ops *port_ops;
238
239 if (pdata->id != FEATURE_DEV_ID_UNUSED)
240 return pdata->id == *(int *)pport_id;
241
242 port_ops = dfl_fpga_port_ops_get(pdev);
243 if (!port_ops || !port_ops->get_id)
244 return 0;
245
246 pdata->id = port_ops->get_id(pdev);
247 dfl_fpga_port_ops_put(port_ops);
248
249 return pdata->id == *(int *)pport_id;
250}
251EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
252
253/**
254 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
255 * @pdev: feature device.
256 */
257void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
258{
259 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
260 struct dfl_feature *feature;
261
262 dfl_fpga_dev_for_each_feature(pdata, feature)
263 if (feature->ops) {
264 if (feature->ops->uinit)
265 feature->ops->uinit(pdev, feature);
266 feature->ops = NULL;
267 }
268}
269EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
270
271static int dfl_feature_instance_init(struct platform_device *pdev,
272 struct dfl_feature_platform_data *pdata,
273 struct dfl_feature *feature,
274 struct dfl_feature_driver *drv)
275{
276 int ret = 0;
277
278 if (drv->ops->init) {
279 ret = drv->ops->init(pdev, feature);
280 if (ret)
281 return ret;
282 }
283
284 feature->ops = drv->ops;
285
286 return ret;
287}
288
289static bool dfl_feature_drv_match(struct dfl_feature *feature,
290 struct dfl_feature_driver *driver)
291{
292 const struct dfl_feature_id *ids = driver->id_table;
293
294 if (ids) {
295 while (ids->id) {
296 if (ids->id == feature->id)
297 return true;
298 ids++;
299 }
300 }
301 return false;
302}
303
304/**
305 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
306 * @pdev: feature device.
307 * @feature_drvs: drvs for sub features.
308 *
309 * This function will match sub features with given feature drvs list and
310 * use matched drv to init related sub feature.
311 *
312 * Return: 0 on success, negative error code otherwise.
313 */
314int dfl_fpga_dev_feature_init(struct platform_device *pdev,
315 struct dfl_feature_driver *feature_drvs)
316{
317 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
318 struct dfl_feature_driver *drv = feature_drvs;
319 struct dfl_feature *feature;
320 int ret;
321
322 while (drv->ops) {
323 dfl_fpga_dev_for_each_feature(pdata, feature) {
324 if (dfl_feature_drv_match(feature, drv)) {
325 ret = dfl_feature_instance_init(pdev, pdata,
326 feature, drv);
327 if (ret)
328 goto exit;
329 }
330 }
331 drv++;
332 }
333
334 return 0;
335exit:
336 dfl_fpga_dev_feature_uinit(pdev);
337 return ret;
338}
339EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
340
341static void dfl_chardev_uinit(void)
342{
343 int i;
344
345 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
346 if (MAJOR(dfl_chrdevs[i].devt)) {
347 unregister_chrdev_region(dfl_chrdevs[i].devt,
348 MINORMASK + 1);
349 dfl_chrdevs[i].devt = MKDEV(0, 0);
350 }
351}
352
353static int dfl_chardev_init(void)
354{
355 int i, ret;
356
357 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
358 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
359 MINORMASK + 1, dfl_chrdevs[i].name);
360 if (ret)
361 goto exit;
362 }
363
364 return 0;
365
366exit:
367 dfl_chardev_uinit();
368 return ret;
369}
370
371static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
372{
373 if (type >= DFL_FPGA_DEVT_MAX)
374 return 0;
375
376 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
377}
378
379/**
380 * dfl_fpga_dev_ops_register - register cdev ops for feature dev
381 *
382 * @pdev: feature dev.
383 * @fops: file operations for feature dev's cdev.
384 * @owner: owning module/driver.
385 *
386 * Return: 0 on success, negative error code otherwise.
387 */
388int dfl_fpga_dev_ops_register(struct platform_device *pdev,
389 const struct file_operations *fops,
390 struct module *owner)
391{
392 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
393
394 cdev_init(&pdata->cdev, fops);
395 pdata->cdev.owner = owner;
396
397 /*
398 * set parent to the feature device so that its refcount is
399 * decreased after the last refcount of cdev is gone, that
400 * makes sure the feature device is valid during device
401 * file's life-cycle.
402 */
403 pdata->cdev.kobj.parent = &pdev->dev.kobj;
404
405 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
406}
407EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
408
409/**
410 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
411 * @pdev: feature dev.
412 */
413void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
414{
415 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
416
417 cdev_del(&pdata->cdev);
418}
419EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
420
421/**
422 * struct build_feature_devs_info - info collected during feature dev build.
423 *
424 * @dev: device to enumerate.
425 * @cdev: the container device for all feature devices.
426 * @nr_irqs: number of irqs for all feature devices.
427 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
428 * this device.
429 * @feature_dev: current feature device.
430 * @ioaddr: header register region address of feature device in enumeration.
431 * @sub_features: a sub features linked list for feature device in enumeration.
432 * @feature_num: number of sub features for feature device in enumeration.
433 */
434struct build_feature_devs_info {
435 struct device *dev;
436 struct dfl_fpga_cdev *cdev;
437 unsigned int nr_irqs;
438 int *irq_table;
439
440 struct platform_device *feature_dev;
441 void __iomem *ioaddr;
442 struct list_head sub_features;
443 int feature_num;
444};
445
446/**
447 * struct dfl_feature_info - sub feature info collected during feature dev build
448 *
449 * @fid: id of this sub feature.
450 * @mmio_res: mmio resource of this sub feature.
451 * @ioaddr: mapped base address of mmio resource.
452 * @node: node in sub_features linked list.
453 * @irq_base: start of irq index in this sub feature.
454 * @nr_irqs: number of irqs of this sub feature.
455 */
456struct dfl_feature_info {
457 u64 fid;
458 struct resource mmio_res;
459 void __iomem *ioaddr;
460 struct list_head node;
461 unsigned int irq_base;
462 unsigned int nr_irqs;
463};
464
465static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
466 struct platform_device *port)
467{
468 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
469
470 mutex_lock(&cdev->lock);
471 list_add(&pdata->node, &cdev->port_dev_list);
472 get_device(&pdata->dev->dev);
473 mutex_unlock(&cdev->lock);
474}
475
476/*
477 * register current feature device, it is called when we need to switch to
478 * another feature parsing or we have parsed all features on given device
479 * feature list.
480 */
481static int build_info_commit_dev(struct build_feature_devs_info *binfo)
482{
483 struct platform_device *fdev = binfo->feature_dev;
484 struct dfl_feature_platform_data *pdata;
485 struct dfl_feature_info *finfo, *p;
486 enum dfl_id_type type;
487 int ret, index = 0;
488
489 if (!fdev)
490 return 0;
491
492 type = feature_dev_id_type(fdev);
493 if (WARN_ON_ONCE(type >= DFL_ID_MAX))
494 return -EINVAL;
495
496 /*
497 * we do not need to care for the memory which is associated with
498 * the platform device. After calling platform_device_unregister(),
499 * it will be automatically freed by device's release() callback,
500 * platform_device_release().
501 */
502 pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
503 if (!pdata)
504 return -ENOMEM;
505
506 pdata->dev = fdev;
507 pdata->num = binfo->feature_num;
508 pdata->dfl_cdev = binfo->cdev;
509 pdata->id = FEATURE_DEV_ID_UNUSED;
510 mutex_init(&pdata->lock);
511 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
512 dfl_pdata_key_strings[type]);
513
514 /*
515 * the count should be initialized to 0 to make sure
516 *__fpga_port_enable() following __fpga_port_disable()
517 * works properly for port device.
518 * and it should always be 0 for fme device.
519 */
520 WARN_ON(pdata->disable_count);
521
522 fdev->dev.platform_data = pdata;
523
524 /* each sub feature has one MMIO resource */
525 fdev->num_resources = binfo->feature_num;
526 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
527 GFP_KERNEL);
528 if (!fdev->resource)
529 return -ENOMEM;
530
531 /* fill features and resource information for feature dev */
532 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
533 struct dfl_feature *feature = &pdata->features[index];
534 struct dfl_feature_irq_ctx *ctx;
535 unsigned int i;
536
537 /* save resource information for each feature */
538 feature->dev = fdev;
539 feature->id = finfo->fid;
540 feature->resource_index = index;
541 feature->ioaddr = finfo->ioaddr;
542 fdev->resource[index++] = finfo->mmio_res;
543
544 if (finfo->nr_irqs) {
545 ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
546 sizeof(*ctx), GFP_KERNEL);
547 if (!ctx)
548 return -ENOMEM;
549
550 for (i = 0; i < finfo->nr_irqs; i++)
551 ctx[i].irq =
552 binfo->irq_table[finfo->irq_base + i];
553
554 feature->irq_ctx = ctx;
555 feature->nr_irqs = finfo->nr_irqs;
556 }
557
558 list_del(&finfo->node);
559 kfree(finfo);
560 }
561
562 ret = platform_device_add(binfo->feature_dev);
563 if (!ret) {
564 if (type == PORT_ID)
565 dfl_fpga_cdev_add_port_dev(binfo->cdev,
566 binfo->feature_dev);
567 else
568 binfo->cdev->fme_dev =
569 get_device(&binfo->feature_dev->dev);
570 /*
571 * reset it to avoid build_info_free() freeing their resource.
572 *
573 * The resource of successfully registered feature devices
574 * will be freed by platform_device_unregister(). See the
575 * comments in build_info_create_dev().
576 */
577 binfo->feature_dev = NULL;
578 }
579
580 return ret;
581}
582
583static int
584build_info_create_dev(struct build_feature_devs_info *binfo,
585 enum dfl_id_type type, void __iomem *ioaddr)
586{
587 struct platform_device *fdev;
588 int ret;
589
590 if (type >= DFL_ID_MAX)
591 return -EINVAL;
592
593 /* we will create a new device, commit current device first */
594 ret = build_info_commit_dev(binfo);
595 if (ret)
596 return ret;
597
598 /*
599 * we use -ENODEV as the initialization indicator which indicates
600 * whether the id need to be reclaimed
601 */
602 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
603 if (!fdev)
604 return -ENOMEM;
605
606 binfo->feature_dev = fdev;
607 binfo->feature_num = 0;
608 binfo->ioaddr = ioaddr;
609 INIT_LIST_HEAD(&binfo->sub_features);
610
611 fdev->id = dfl_id_alloc(type, &fdev->dev);
612 if (fdev->id < 0)
613 return fdev->id;
614
615 fdev->dev.parent = &binfo->cdev->region->dev;
616 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
617
618 return 0;
619}
620
621static void build_info_free(struct build_feature_devs_info *binfo)
622{
623 struct dfl_feature_info *finfo, *p;
624
625 /*
626 * it is a valid id, free it. See comments in
627 * build_info_create_dev()
628 */
629 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
630 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
631 binfo->feature_dev->id);
632
633 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
634 list_del(&finfo->node);
635 kfree(finfo);
636 }
637 }
638
639 platform_device_put(binfo->feature_dev);
640
641 devm_kfree(binfo->dev, binfo);
642}
643
644static inline u32 feature_size(void __iomem *start)
645{
646 u64 v = readq(start + DFH);
647 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
648 /* workaround for private features with invalid size, use 4K instead */
649 return ofst ? ofst : 4096;
650}
651
652static u64 feature_id(void __iomem *start)
653{
654 u64 v = readq(start + DFH);
655 u16 id = FIELD_GET(DFH_ID, v);
656 u8 type = FIELD_GET(DFH_TYPE, v);
657
658 if (type == DFH_TYPE_FIU)
659 return FEATURE_ID_FIU_HEADER;
660 else if (type == DFH_TYPE_PRIVATE)
661 return id;
662 else if (type == DFH_TYPE_AFU)
663 return FEATURE_ID_AFU;
664
665 WARN_ON(1);
666 return 0;
667}
668
669static int parse_feature_irqs(struct build_feature_devs_info *binfo,
670 resource_size_t ofst, u64 fid,
671 unsigned int *irq_base, unsigned int *nr_irqs)
672{
673 void __iomem *base = binfo->ioaddr + ofst;
674 unsigned int i, ibase, inr = 0;
675 int virq;
676 u64 v;
677
678 /*
679 * Ideally DFL framework should only read info from DFL header, but
680 * current version DFL only provides mmio resources information for
681 * each feature in DFL Header, no field for interrupt resources.
682 * Interrupt resource information is provided by specific mmio
683 * registers of each private feature which supports interrupt. So in
684 * order to parse and assign irq resources, DFL framework has to look
685 * into specific capability registers of these private features.
686 *
687 * Once future DFL version supports generic interrupt resource
688 * information in common DFL headers, the generic interrupt parsing
689 * code will be added. But in order to be compatible to old version
690 * DFL, the driver may still fall back to these quirks.
691 */
692 switch (fid) {
693 case PORT_FEATURE_ID_UINT:
694 v = readq(base + PORT_UINT_CAP);
695 ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
696 inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
697 break;
698 case PORT_FEATURE_ID_ERROR:
699 v = readq(base + PORT_ERROR_CAP);
700 ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
701 inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
702 break;
703 case FME_FEATURE_ID_GLOBAL_ERR:
704 v = readq(base + FME_ERROR_CAP);
705 ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
706 inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
707 break;
708 }
709
710 if (!inr) {
711 *irq_base = 0;
712 *nr_irqs = 0;
713 return 0;
714 }
715
716 dev_dbg(binfo->dev, "feature: 0x%llx, irq_base: %u, nr_irqs: %u\n",
717 fid, ibase, inr);
718
719 if (ibase + inr > binfo->nr_irqs) {
720 dev_err(binfo->dev,
721 "Invalid interrupt number in feature 0x%llx\n", fid);
722 return -EINVAL;
723 }
724
725 for (i = 0; i < inr; i++) {
726 virq = binfo->irq_table[ibase + i];
727 if (virq < 0 || virq > NR_IRQS) {
728 dev_err(binfo->dev,
729 "Invalid irq table entry for feature 0x%llx\n",
730 fid);
731 return -EINVAL;
732 }
733 }
734
735 *irq_base = ibase;
736 *nr_irqs = inr;
737
738 return 0;
739}
740
741/*
742 * when create sub feature instances, for private features, it doesn't need
743 * to provide resource size and feature id as they could be read from DFH
744 * register. For afu sub feature, its register region only contains user
745 * defined registers, so never trust any information from it, just use the
746 * resource size information provided by its parent FIU.
747 */
748static int
749create_feature_instance(struct build_feature_devs_info *binfo,
750 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
751 resource_size_t size, u64 fid)
752{
753 unsigned int irq_base, nr_irqs;
754 struct dfl_feature_info *finfo;
755 int ret;
756
757 /* read feature size and id if inputs are invalid */
758 size = size ? size : feature_size(dfl->ioaddr + ofst);
759 fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
760
761 if (dfl->len - ofst < size)
762 return -EINVAL;
763
764 ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
765 if (ret)
766 return ret;
767
768 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
769 if (!finfo)
770 return -ENOMEM;
771
772 finfo->fid = fid;
773 finfo->mmio_res.start = dfl->start + ofst;
774 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
775 finfo->mmio_res.flags = IORESOURCE_MEM;
776 finfo->irq_base = irq_base;
777 finfo->nr_irqs = nr_irqs;
778 finfo->ioaddr = dfl->ioaddr + ofst;
779
780 list_add_tail(&finfo->node, &binfo->sub_features);
781 binfo->feature_num++;
782
783 return 0;
784}
785
786static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
787 struct dfl_fpga_enum_dfl *dfl,
788 resource_size_t ofst)
789{
790 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
791 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
792
793 WARN_ON(!size);
794
795 return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
796}
797
798static int parse_feature_afu(struct build_feature_devs_info *binfo,
799 struct dfl_fpga_enum_dfl *dfl,
800 resource_size_t ofst)
801{
802 if (!binfo->feature_dev) {
803 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
804 return -EINVAL;
805 }
806
807 switch (feature_dev_id_type(binfo->feature_dev)) {
808 case PORT_ID:
809 return parse_feature_port_afu(binfo, dfl, ofst);
810 default:
811 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
812 binfo->feature_dev->name);
813 }
814
815 return 0;
816}
817
818static int parse_feature_fiu(struct build_feature_devs_info *binfo,
819 struct dfl_fpga_enum_dfl *dfl,
820 resource_size_t ofst)
821{
822 u32 id, offset;
823 u64 v;
824 int ret = 0;
825
826 v = readq(dfl->ioaddr + ofst + DFH);
827 id = FIELD_GET(DFH_ID, v);
828
829 /* create platform device for dfl feature dev */
830 ret = build_info_create_dev(binfo, dfh_id_to_type(id),
831 dfl->ioaddr + ofst);
832 if (ret)
833 return ret;
834
835 ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
836 if (ret)
837 return ret;
838 /*
839 * find and parse FIU's child AFU via its NEXT_AFU register.
840 * please note that only Port has valid NEXT_AFU pointer per spec.
841 */
842 v = readq(dfl->ioaddr + ofst + NEXT_AFU);
843
844 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
845 if (offset)
846 return parse_feature_afu(binfo, dfl, ofst + offset);
847
848 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
849
850 return ret;
851}
852
853static int parse_feature_private(struct build_feature_devs_info *binfo,
854 struct dfl_fpga_enum_dfl *dfl,
855 resource_size_t ofst)
856{
857 if (!binfo->feature_dev) {
858 dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
859 (unsigned long long)feature_id(dfl->ioaddr + ofst));
860 return -EINVAL;
861 }
862
863 return create_feature_instance(binfo, dfl, ofst, 0, 0);
864}
865
866/**
867 * parse_feature - parse a feature on given device feature list
868 *
869 * @binfo: build feature devices information.
870 * @dfl: device feature list to parse
871 * @ofst: offset to feature header on this device feature list
872 */
873static int parse_feature(struct build_feature_devs_info *binfo,
874 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
875{
876 u64 v;
877 u32 type;
878
879 v = readq(dfl->ioaddr + ofst + DFH);
880 type = FIELD_GET(DFH_TYPE, v);
881
882 switch (type) {
883 case DFH_TYPE_AFU:
884 return parse_feature_afu(binfo, dfl, ofst);
885 case DFH_TYPE_PRIVATE:
886 return parse_feature_private(binfo, dfl, ofst);
887 case DFH_TYPE_FIU:
888 return parse_feature_fiu(binfo, dfl, ofst);
889 default:
890 dev_info(binfo->dev,
891 "Feature Type %x is not supported.\n", type);
892 }
893
894 return 0;
895}
896
897static int parse_feature_list(struct build_feature_devs_info *binfo,
898 struct dfl_fpga_enum_dfl *dfl)
899{
900 void __iomem *start = dfl->ioaddr;
901 void __iomem *end = dfl->ioaddr + dfl->len;
902 int ret = 0;
903 u32 ofst = 0;
904 u64 v;
905
906 /* walk through the device feature list via DFH's next DFH pointer. */
907 for (; start < end; start += ofst) {
908 if (end - start < DFH_SIZE) {
909 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
910 return -EINVAL;
911 }
912
913 ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
914 if (ret)
915 return ret;
916
917 v = readq(start + DFH);
918 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
919
920 /* stop parsing if EOL(End of List) is set or offset is 0 */
921 if ((v & DFH_EOL) || !ofst)
922 break;
923 }
924
925 /* commit current feature device when reach the end of list */
926 return build_info_commit_dev(binfo);
927}
928
929struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
930{
931 struct dfl_fpga_enum_info *info;
932
933 get_device(dev);
934
935 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
936 if (!info) {
937 put_device(dev);
938 return NULL;
939 }
940
941 info->dev = dev;
942 INIT_LIST_HEAD(&info->dfls);
943
944 return info;
945}
946EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
947
948void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
949{
950 struct dfl_fpga_enum_dfl *tmp, *dfl;
951 struct device *dev;
952
953 if (!info)
954 return;
955
956 dev = info->dev;
957
958 /* remove all device feature lists in the list. */
959 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
960 list_del(&dfl->node);
961 devm_kfree(dev, dfl);
962 }
963
964 /* remove irq table */
965 if (info->irq_table)
966 devm_kfree(dev, info->irq_table);
967
968 devm_kfree(dev, info);
969 put_device(dev);
970}
971EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
972
973/**
974 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
975 *
976 * @info: ptr to dfl_fpga_enum_info
977 * @start: mmio resource address of the device feature list.
978 * @len: mmio resource length of the device feature list.
979 * @ioaddr: mapped mmio resource address of the device feature list.
980 *
981 * One FPGA device may have one or more Device Feature Lists (DFLs), use this
982 * function to add information of each DFL to common data structure for next
983 * step enumeration.
984 *
985 * Return: 0 on success, negative error code otherwise.
986 */
987int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
988 resource_size_t start, resource_size_t len,
989 void __iomem *ioaddr)
990{
991 struct dfl_fpga_enum_dfl *dfl;
992
993 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
994 if (!dfl)
995 return -ENOMEM;
996
997 dfl->start = start;
998 dfl->len = len;
999 dfl->ioaddr = ioaddr;
1000
1001 list_add_tail(&dfl->node, &info->dfls);
1002
1003 return 0;
1004}
1005EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
1006
1007/**
1008 * dfl_fpga_enum_info_add_irq - add irq table to enum info
1009 *
1010 * @info: ptr to dfl_fpga_enum_info
1011 * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
1012 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
1013 * this device.
1014 *
1015 * One FPGA device may have several interrupts. This function adds irq
1016 * information of the DFL fpga device to enum info for next step enumeration.
1017 * This function should be called before dfl_fpga_feature_devs_enumerate().
1018 * As we only support one irq domain for all DFLs in the same enum info, adding
1019 * irq table a second time for the same enum info will return error.
1020 *
1021 * If we need to enumerate DFLs which belong to different irq domains, we
1022 * should fill more enum info and enumerate them one by one.
1023 *
1024 * Return: 0 on success, negative error code otherwise.
1025 */
1026int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
1027 unsigned int nr_irqs, int *irq_table)
1028{
1029 if (!nr_irqs || !irq_table)
1030 return -EINVAL;
1031
1032 if (info->irq_table)
1033 return -EEXIST;
1034
1035 info->irq_table = devm_kmemdup(info->dev, irq_table,
1036 sizeof(int) * nr_irqs, GFP_KERNEL);
1037 if (!info->irq_table)
1038 return -ENOMEM;
1039
1040 info->nr_irqs = nr_irqs;
1041
1042 return 0;
1043}
1044EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
1045
1046static int remove_feature_dev(struct device *dev, void *data)
1047{
1048 struct platform_device *pdev = to_platform_device(dev);
1049 enum dfl_id_type type = feature_dev_id_type(pdev);
1050 int id = pdev->id;
1051
1052 platform_device_unregister(pdev);
1053
1054 dfl_id_free(type, id);
1055
1056 return 0;
1057}
1058
1059static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
1060{
1061 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
1062}
1063
1064/**
1065 * dfl_fpga_feature_devs_enumerate - enumerate feature devices
1066 * @info: information for enumeration.
1067 *
1068 * This function creates a container device (base FPGA region), enumerates
1069 * feature devices based on the enumeration info and creates platform devices
1070 * under the container device.
1071 *
1072 * Return: dfl_fpga_cdev struct on success, -errno on failure
1073 */
1074struct dfl_fpga_cdev *
1075dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
1076{
1077 struct build_feature_devs_info *binfo;
1078 struct dfl_fpga_enum_dfl *dfl;
1079 struct dfl_fpga_cdev *cdev;
1080 int ret = 0;
1081
1082 if (!info->dev)
1083 return ERR_PTR(-ENODEV);
1084
1085 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
1086 if (!cdev)
1087 return ERR_PTR(-ENOMEM);
1088
1089 cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
1090 if (!cdev->region) {
1091 ret = -ENOMEM;
1092 goto free_cdev_exit;
1093 }
1094
1095 cdev->parent = info->dev;
1096 mutex_init(&cdev->lock);
1097 INIT_LIST_HEAD(&cdev->port_dev_list);
1098
1099 ret = fpga_region_register(cdev->region);
1100 if (ret)
1101 goto free_cdev_exit;
1102
1103 /* create and init build info for enumeration */
1104 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
1105 if (!binfo) {
1106 ret = -ENOMEM;
1107 goto unregister_region_exit;
1108 }
1109
1110 binfo->dev = info->dev;
1111 binfo->cdev = cdev;
1112
1113 binfo->nr_irqs = info->nr_irqs;
1114 if (info->nr_irqs)
1115 binfo->irq_table = info->irq_table;
1116
1117 /*
1118 * start enumeration for all feature devices based on Device Feature
1119 * Lists.
1120 */
1121 list_for_each_entry(dfl, &info->dfls, node) {
1122 ret = parse_feature_list(binfo, dfl);
1123 if (ret) {
1124 remove_feature_devs(cdev);
1125 build_info_free(binfo);
1126 goto unregister_region_exit;
1127 }
1128 }
1129
1130 build_info_free(binfo);
1131
1132 return cdev;
1133
1134unregister_region_exit:
1135 fpga_region_unregister(cdev->region);
1136free_cdev_exit:
1137 devm_kfree(info->dev, cdev);
1138 return ERR_PTR(ret);
1139}
1140EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
1141
1142/**
1143 * dfl_fpga_feature_devs_remove - remove all feature devices
1144 * @cdev: fpga container device.
1145 *
1146 * Remove the container device and all feature devices under given container
1147 * devices.
1148 */
1149void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
1150{
1151 struct dfl_feature_platform_data *pdata, *ptmp;
1152
1153 mutex_lock(&cdev->lock);
1154 if (cdev->fme_dev)
1155 put_device(cdev->fme_dev);
1156
1157 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
1158 struct platform_device *port_dev = pdata->dev;
1159
1160 /* remove released ports */
1161 if (!device_is_registered(&port_dev->dev)) {
1162 dfl_id_free(feature_dev_id_type(port_dev),
1163 port_dev->id);
1164 platform_device_put(port_dev);
1165 }
1166
1167 list_del(&pdata->node);
1168 put_device(&port_dev->dev);
1169 }
1170 mutex_unlock(&cdev->lock);
1171
1172 remove_feature_devs(cdev);
1173
1174 fpga_region_unregister(cdev->region);
1175 devm_kfree(cdev->parent, cdev);
1176}
1177EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1178
1179/**
1180 * __dfl_fpga_cdev_find_port - find a port under given container device
1181 *
1182 * @cdev: container device
1183 * @data: data passed to match function
1184 * @match: match function used to find specific port from the port device list
1185 *
1186 * Find a port device under container device. This function needs to be
1187 * invoked with lock held.
1188 *
1189 * Return: pointer to port's platform device if successful, NULL otherwise.
1190 *
1191 * NOTE: you will need to drop the device reference with put_device() after use.
1192 */
1193struct platform_device *
1194__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1195 int (*match)(struct platform_device *, void *))
1196{
1197 struct dfl_feature_platform_data *pdata;
1198 struct platform_device *port_dev;
1199
1200 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1201 port_dev = pdata->dev;
1202
1203 if (match(port_dev, data) && get_device(&port_dev->dev))
1204 return port_dev;
1205 }
1206
1207 return NULL;
1208}
1209EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1210
1211static int __init dfl_fpga_init(void)
1212{
1213 int ret;
1214
1215 dfl_ids_init();
1216
1217 ret = dfl_chardev_init();
1218 if (ret)
1219 dfl_ids_destroy();
1220
1221 return ret;
1222}
1223
1224/**
1225 * dfl_fpga_cdev_release_port - release a port platform device
1226 *
1227 * @cdev: parent container device.
1228 * @port_id: id of the port platform device.
1229 *
1230 * This function allows user to release a port platform device. This is a
1231 * mandatory step before turn a port from PF into VF for SRIOV support.
1232 *
1233 * Return: 0 on success, negative error code otherwise.
1234 */
1235int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1236{
1237 struct dfl_feature_platform_data *pdata;
1238 struct platform_device *port_pdev;
1239 int ret = -ENODEV;
1240
1241 mutex_lock(&cdev->lock);
1242 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1243 dfl_fpga_check_port_id);
1244 if (!port_pdev)
1245 goto unlock_exit;
1246
1247 if (!device_is_registered(&port_pdev->dev)) {
1248 ret = -EBUSY;
1249 goto put_dev_exit;
1250 }
1251
1252 pdata = dev_get_platdata(&port_pdev->dev);
1253
1254 mutex_lock(&pdata->lock);
1255 ret = dfl_feature_dev_use_begin(pdata, true);
1256 mutex_unlock(&pdata->lock);
1257 if (ret)
1258 goto put_dev_exit;
1259
1260 platform_device_del(port_pdev);
1261 cdev->released_port_num++;
1262put_dev_exit:
1263 put_device(&port_pdev->dev);
1264unlock_exit:
1265 mutex_unlock(&cdev->lock);
1266 return ret;
1267}
1268EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1269
1270/**
1271 * dfl_fpga_cdev_assign_port - assign a port platform device back
1272 *
1273 * @cdev: parent container device.
1274 * @port_id: id of the port platform device.
1275 *
1276 * This function allows user to assign a port platform device back. This is
1277 * a mandatory step after disable SRIOV support.
1278 *
1279 * Return: 0 on success, negative error code otherwise.
1280 */
1281int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1282{
1283 struct dfl_feature_platform_data *pdata;
1284 struct platform_device *port_pdev;
1285 int ret = -ENODEV;
1286
1287 mutex_lock(&cdev->lock);
1288 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1289 dfl_fpga_check_port_id);
1290 if (!port_pdev)
1291 goto unlock_exit;
1292
1293 if (device_is_registered(&port_pdev->dev)) {
1294 ret = -EBUSY;
1295 goto put_dev_exit;
1296 }
1297
1298 ret = platform_device_add(port_pdev);
1299 if (ret)
1300 goto put_dev_exit;
1301
1302 pdata = dev_get_platdata(&port_pdev->dev);
1303
1304 mutex_lock(&pdata->lock);
1305 dfl_feature_dev_use_end(pdata);
1306 mutex_unlock(&pdata->lock);
1307
1308 cdev->released_port_num--;
1309put_dev_exit:
1310 put_device(&port_pdev->dev);
1311unlock_exit:
1312 mutex_unlock(&cdev->lock);
1313 return ret;
1314}
1315EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1316
1317static void config_port_access_mode(struct device *fme_dev, int port_id,
1318 bool is_vf)
1319{
1320 void __iomem *base;
1321 u64 v;
1322
1323 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1324
1325 v = readq(base + FME_HDR_PORT_OFST(port_id));
1326
1327 v &= ~FME_PORT_OFST_ACC_CTRL;
1328 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1329 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1330
1331 writeq(v, base + FME_HDR_PORT_OFST(port_id));
1332}
1333
1334#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1335#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1336
1337/**
1338 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1339 *
1340 * @cdev: parent container device.
1341 *
1342 * This function is needed in sriov configuration routine. It could be used to
1343 * configure the all released ports from VF access mode to PF.
1344 */
1345void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1346{
1347 struct dfl_feature_platform_data *pdata;
1348
1349 mutex_lock(&cdev->lock);
1350 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1351 if (device_is_registered(&pdata->dev->dev))
1352 continue;
1353
1354 config_port_pf_mode(cdev->fme_dev, pdata->id);
1355 }
1356 mutex_unlock(&cdev->lock);
1357}
1358EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1359
1360/**
1361 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1362 *
1363 * @cdev: parent container device.
1364 * @num_vfs: VF device number.
1365 *
1366 * This function is needed in sriov configuration routine. It could be used to
1367 * configure the released ports from PF access mode to VF.
1368 *
1369 * Return: 0 on success, negative error code otherwise.
1370 */
1371int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1372{
1373 struct dfl_feature_platform_data *pdata;
1374 int ret = 0;
1375
1376 mutex_lock(&cdev->lock);
1377 /*
1378 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1379 * device, so if released port number doesn't match VF device number,
1380 * then reject the request with -EINVAL error code.
1381 */
1382 if (cdev->released_port_num != num_vfs) {
1383 ret = -EINVAL;
1384 goto done;
1385 }
1386
1387 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1388 if (device_is_registered(&pdata->dev->dev))
1389 continue;
1390
1391 config_port_vf_mode(cdev->fme_dev, pdata->id);
1392 }
1393done:
1394 mutex_unlock(&cdev->lock);
1395 return ret;
1396}
1397EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1398
1399static irqreturn_t dfl_irq_handler(int irq, void *arg)
1400{
1401 struct eventfd_ctx *trigger = arg;
1402
1403 eventfd_signal(trigger, 1);
1404 return IRQ_HANDLED;
1405}
1406
1407static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
1408 int fd)
1409{
1410 struct platform_device *pdev = feature->dev;
1411 struct eventfd_ctx *trigger;
1412 int irq, ret;
1413
1414 irq = feature->irq_ctx[idx].irq;
1415
1416 if (feature->irq_ctx[idx].trigger) {
1417 free_irq(irq, feature->irq_ctx[idx].trigger);
1418 kfree(feature->irq_ctx[idx].name);
1419 eventfd_ctx_put(feature->irq_ctx[idx].trigger);
1420 feature->irq_ctx[idx].trigger = NULL;
1421 }
1422
1423 if (fd < 0)
1424 return 0;
1425
1426 feature->irq_ctx[idx].name =
1427 kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx,
1428 dev_name(&pdev->dev), feature->id);
1429 if (!feature->irq_ctx[idx].name)
1430 return -ENOMEM;
1431
1432 trigger = eventfd_ctx_fdget(fd);
1433 if (IS_ERR(trigger)) {
1434 ret = PTR_ERR(trigger);
1435 goto free_name;
1436 }
1437
1438 ret = request_irq(irq, dfl_irq_handler, 0,
1439 feature->irq_ctx[idx].name, trigger);
1440 if (!ret) {
1441 feature->irq_ctx[idx].trigger = trigger;
1442 return ret;
1443 }
1444
1445 eventfd_ctx_put(trigger);
1446free_name:
1447 kfree(feature->irq_ctx[idx].name);
1448
1449 return ret;
1450}
1451
1452/**
1453 * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
1454 *
1455 * @feature: dfl sub feature.
1456 * @start: start of irq index in this dfl sub feature.
1457 * @count: number of irqs.
1458 * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
1459 * unbind "count" specified number of irqs if fds ptr is NULL.
1460 *
1461 * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
1462 * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
1463 * NULL.
1464 *
1465 * Return: 0 on success, negative error code otherwise.
1466 */
1467int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
1468 unsigned int count, int32_t *fds)
1469{
1470 unsigned int i;
1471 int ret = 0;
1472
1473 /* overflow */
1474 if (unlikely(start + count < start))
1475 return -EINVAL;
1476
1477 /* exceeds nr_irqs */
1478 if (start + count > feature->nr_irqs)
1479 return -EINVAL;
1480
1481 for (i = 0; i < count; i++) {
1482 int fd = fds ? fds[i] : -1;
1483
1484 ret = do_set_irq_trigger(feature, start + i, fd);
1485 if (ret) {
1486 while (i--)
1487 do_set_irq_trigger(feature, start + i, -1);
1488 break;
1489 }
1490 }
1491
1492 return ret;
1493}
1494EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
1495
1496/**
1497 * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
1498 * @pdev: the feature device which has the sub feature
1499 * @feature: the dfl sub feature
1500 * @arg: ioctl argument
1501 *
1502 * Return: 0 on success, negative error code otherwise.
1503 */
1504long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
1505 struct dfl_feature *feature,
1506 unsigned long arg)
1507{
1508 return put_user(feature->nr_irqs, (__u32 __user *)arg);
1509}
1510EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
1511
1512/**
1513 * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
1514 * @pdev: the feature device which has the sub feature
1515 * @feature: the dfl sub feature
1516 * @arg: ioctl argument
1517 *
1518 * Return: 0 on success, negative error code otherwise.
1519 */
1520long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
1521 struct dfl_feature *feature,
1522 unsigned long arg)
1523{
1524 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
1525 struct dfl_fpga_irq_set hdr;
1526 s32 *fds;
1527 long ret;
1528
1529 if (!feature->nr_irqs)
1530 return -ENOENT;
1531
1532 if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
1533 return -EFAULT;
1534
1535 if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
1536 (hdr.start + hdr.count < hdr.start))
1537 return -EINVAL;
1538
1539 fds = memdup_user((void __user *)(arg + sizeof(hdr)),
1540 hdr.count * sizeof(s32));
1541 if (IS_ERR(fds))
1542 return PTR_ERR(fds);
1543
1544 mutex_lock(&pdata->lock);
1545 ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
1546 mutex_unlock(&pdata->lock);
1547
1548 kfree(fds);
1549 return ret;
1550}
1551EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
1552
1553static void __exit dfl_fpga_exit(void)
1554{
1555 dfl_chardev_uinit();
1556 dfl_ids_destroy();
1557}
1558
1559module_init(dfl_fpga_init);
1560module_exit(dfl_fpga_exit);
1561
1562MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1563MODULE_AUTHOR("Intel Corporation");
1564MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) Support
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
12 */
13#include <linux/module.h>
14
15#include "dfl.h"
16
17static DEFINE_MUTEX(dfl_id_mutex);
18
19/*
20 * when adding a new feature dev support in DFL framework, it's required to
21 * add a new item in enum dfl_id_type and provide related information in below
22 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
23 * platform device creation (define name strings in dfl.h, as they could be
24 * reused by platform device drivers).
25 *
26 * if the new feature dev needs chardev support, then it's required to add
27 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
28 * index to dfl_chardevs table. If no chardev support just set devt_type
29 * as one invalid index (DFL_FPGA_DEVT_MAX).
30 */
31enum dfl_id_type {
32 FME_ID, /* fme id allocation and mapping */
33 PORT_ID, /* port id allocation and mapping */
34 DFL_ID_MAX,
35};
36
37enum dfl_fpga_devt_type {
38 DFL_FPGA_DEVT_FME,
39 DFL_FPGA_DEVT_PORT,
40 DFL_FPGA_DEVT_MAX,
41};
42
43static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
44
45static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
46 "dfl-fme-pdata",
47 "dfl-port-pdata",
48};
49
50/**
51 * dfl_dev_info - dfl feature device information.
52 * @name: name string of the feature platform device.
53 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
54 * @id: idr id of the feature dev.
55 * @devt_type: index to dfl_chrdevs[].
56 */
57struct dfl_dev_info {
58 const char *name;
59 u32 dfh_id;
60 struct idr id;
61 enum dfl_fpga_devt_type devt_type;
62};
63
64/* it is indexed by dfl_id_type */
65static struct dfl_dev_info dfl_devs[] = {
66 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
67 .devt_type = DFL_FPGA_DEVT_FME},
68 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
69 .devt_type = DFL_FPGA_DEVT_PORT},
70};
71
72/**
73 * dfl_chardev_info - chardev information of dfl feature device
74 * @name: nmae string of the char device.
75 * @devt: devt of the char device.
76 */
77struct dfl_chardev_info {
78 const char *name;
79 dev_t devt;
80};
81
82/* indexed by enum dfl_fpga_devt_type */
83static struct dfl_chardev_info dfl_chrdevs[] = {
84 {.name = DFL_FPGA_FEATURE_DEV_FME},
85 {.name = DFL_FPGA_FEATURE_DEV_PORT},
86};
87
88static void dfl_ids_init(void)
89{
90 int i;
91
92 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
93 idr_init(&dfl_devs[i].id);
94}
95
96static void dfl_ids_destroy(void)
97{
98 int i;
99
100 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
101 idr_destroy(&dfl_devs[i].id);
102}
103
104static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
105{
106 int id;
107
108 WARN_ON(type >= DFL_ID_MAX);
109 mutex_lock(&dfl_id_mutex);
110 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
111 mutex_unlock(&dfl_id_mutex);
112
113 return id;
114}
115
116static void dfl_id_free(enum dfl_id_type type, int id)
117{
118 WARN_ON(type >= DFL_ID_MAX);
119 mutex_lock(&dfl_id_mutex);
120 idr_remove(&dfl_devs[type].id, id);
121 mutex_unlock(&dfl_id_mutex);
122}
123
124static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
125{
126 int i;
127
128 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
129 if (!strcmp(dfl_devs[i].name, pdev->name))
130 return i;
131
132 return DFL_ID_MAX;
133}
134
135static enum dfl_id_type dfh_id_to_type(u32 id)
136{
137 int i;
138
139 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
140 if (dfl_devs[i].dfh_id == id)
141 return i;
142
143 return DFL_ID_MAX;
144}
145
146/*
147 * introduce a global port_ops list, it allows port drivers to register ops
148 * in such list, then other feature devices (e.g. FME), could use the port
149 * functions even related port platform device is hidden. Below is one example,
150 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
151 * enabled, port (and it's AFU) is turned into VF and port platform device
152 * is hidden from system but it's still required to access port to finish FPGA
153 * reconfiguration function in FME.
154 */
155
156static DEFINE_MUTEX(dfl_port_ops_mutex);
157static LIST_HEAD(dfl_port_ops_list);
158
159/**
160 * dfl_fpga_port_ops_get - get matched port ops from the global list
161 * @pdev: platform device to match with associated port ops.
162 * Return: matched port ops on success, NULL otherwise.
163 *
164 * Please note that must dfl_fpga_port_ops_put after use the port_ops.
165 */
166struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
167{
168 struct dfl_fpga_port_ops *ops = NULL;
169
170 mutex_lock(&dfl_port_ops_mutex);
171 if (list_empty(&dfl_port_ops_list))
172 goto done;
173
174 list_for_each_entry(ops, &dfl_port_ops_list, node) {
175 /* match port_ops using the name of platform device */
176 if (!strcmp(pdev->name, ops->name)) {
177 if (!try_module_get(ops->owner))
178 ops = NULL;
179 goto done;
180 }
181 }
182
183 ops = NULL;
184done:
185 mutex_unlock(&dfl_port_ops_mutex);
186 return ops;
187}
188EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
189
190/**
191 * dfl_fpga_port_ops_put - put port ops
192 * @ops: port ops.
193 */
194void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
195{
196 if (ops && ops->owner)
197 module_put(ops->owner);
198}
199EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
200
201/**
202 * dfl_fpga_port_ops_add - add port_ops to global list
203 * @ops: port ops to add.
204 */
205void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
206{
207 mutex_lock(&dfl_port_ops_mutex);
208 list_add_tail(&ops->node, &dfl_port_ops_list);
209 mutex_unlock(&dfl_port_ops_mutex);
210}
211EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
212
213/**
214 * dfl_fpga_port_ops_del - remove port_ops from global list
215 * @ops: port ops to del.
216 */
217void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
218{
219 mutex_lock(&dfl_port_ops_mutex);
220 list_del(&ops->node);
221 mutex_unlock(&dfl_port_ops_mutex);
222}
223EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
224
225/**
226 * dfl_fpga_check_port_id - check the port id
227 * @pdev: port platform device.
228 * @pport_id: port id to compare.
229 *
230 * Return: 1 if port device matches with given port id, otherwise 0.
231 */
232int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
233{
234 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
235 struct dfl_fpga_port_ops *port_ops;
236
237 if (pdata->id != FEATURE_DEV_ID_UNUSED)
238 return pdata->id == *(int *)pport_id;
239
240 port_ops = dfl_fpga_port_ops_get(pdev);
241 if (!port_ops || !port_ops->get_id)
242 return 0;
243
244 pdata->id = port_ops->get_id(pdev);
245 dfl_fpga_port_ops_put(port_ops);
246
247 return pdata->id == *(int *)pport_id;
248}
249EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
250
251/**
252 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
253 * @pdev: feature device.
254 */
255void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
256{
257 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
258 struct dfl_feature *feature;
259
260 dfl_fpga_dev_for_each_feature(pdata, feature)
261 if (feature->ops) {
262 if (feature->ops->uinit)
263 feature->ops->uinit(pdev, feature);
264 feature->ops = NULL;
265 }
266}
267EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
268
269static int dfl_feature_instance_init(struct platform_device *pdev,
270 struct dfl_feature_platform_data *pdata,
271 struct dfl_feature *feature,
272 struct dfl_feature_driver *drv)
273{
274 int ret = 0;
275
276 if (drv->ops->init) {
277 ret = drv->ops->init(pdev, feature);
278 if (ret)
279 return ret;
280 }
281
282 feature->ops = drv->ops;
283
284 return ret;
285}
286
287static bool dfl_feature_drv_match(struct dfl_feature *feature,
288 struct dfl_feature_driver *driver)
289{
290 const struct dfl_feature_id *ids = driver->id_table;
291
292 if (ids) {
293 while (ids->id) {
294 if (ids->id == feature->id)
295 return true;
296 ids++;
297 }
298 }
299 return false;
300}
301
302/**
303 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
304 * @pdev: feature device.
305 * @feature_drvs: drvs for sub features.
306 *
307 * This function will match sub features with given feature drvs list and
308 * use matched drv to init related sub feature.
309 *
310 * Return: 0 on success, negative error code otherwise.
311 */
312int dfl_fpga_dev_feature_init(struct platform_device *pdev,
313 struct dfl_feature_driver *feature_drvs)
314{
315 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
316 struct dfl_feature_driver *drv = feature_drvs;
317 struct dfl_feature *feature;
318 int ret;
319
320 while (drv->ops) {
321 dfl_fpga_dev_for_each_feature(pdata, feature) {
322 if (dfl_feature_drv_match(feature, drv)) {
323 ret = dfl_feature_instance_init(pdev, pdata,
324 feature, drv);
325 if (ret)
326 goto exit;
327 }
328 }
329 drv++;
330 }
331
332 return 0;
333exit:
334 dfl_fpga_dev_feature_uinit(pdev);
335 return ret;
336}
337EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
338
339static void dfl_chardev_uinit(void)
340{
341 int i;
342
343 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
344 if (MAJOR(dfl_chrdevs[i].devt)) {
345 unregister_chrdev_region(dfl_chrdevs[i].devt,
346 MINORMASK + 1);
347 dfl_chrdevs[i].devt = MKDEV(0, 0);
348 }
349}
350
351static int dfl_chardev_init(void)
352{
353 int i, ret;
354
355 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
356 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
357 MINORMASK + 1, dfl_chrdevs[i].name);
358 if (ret)
359 goto exit;
360 }
361
362 return 0;
363
364exit:
365 dfl_chardev_uinit();
366 return ret;
367}
368
369static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
370{
371 if (type >= DFL_FPGA_DEVT_MAX)
372 return 0;
373
374 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
375}
376
377/**
378 * dfl_fpga_dev_ops_register - register cdev ops for feature dev
379 *
380 * @pdev: feature dev.
381 * @fops: file operations for feature dev's cdev.
382 * @owner: owning module/driver.
383 *
384 * Return: 0 on success, negative error code otherwise.
385 */
386int dfl_fpga_dev_ops_register(struct platform_device *pdev,
387 const struct file_operations *fops,
388 struct module *owner)
389{
390 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
391
392 cdev_init(&pdata->cdev, fops);
393 pdata->cdev.owner = owner;
394
395 /*
396 * set parent to the feature device so that its refcount is
397 * decreased after the last refcount of cdev is gone, that
398 * makes sure the feature device is valid during device
399 * file's life-cycle.
400 */
401 pdata->cdev.kobj.parent = &pdev->dev.kobj;
402
403 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
404}
405EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
406
407/**
408 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
409 * @pdev: feature dev.
410 */
411void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
412{
413 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
414
415 cdev_del(&pdata->cdev);
416}
417EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
418
419/**
420 * struct build_feature_devs_info - info collected during feature dev build.
421 *
422 * @dev: device to enumerate.
423 * @cdev: the container device for all feature devices.
424 * @feature_dev: current feature device.
425 * @ioaddr: header register region address of feature device in enumeration.
426 * @sub_features: a sub features linked list for feature device in enumeration.
427 * @feature_num: number of sub features for feature device in enumeration.
428 */
429struct build_feature_devs_info {
430 struct device *dev;
431 struct dfl_fpga_cdev *cdev;
432 struct platform_device *feature_dev;
433 void __iomem *ioaddr;
434 struct list_head sub_features;
435 int feature_num;
436};
437
438/**
439 * struct dfl_feature_info - sub feature info collected during feature dev build
440 *
441 * @fid: id of this sub feature.
442 * @mmio_res: mmio resource of this sub feature.
443 * @ioaddr: mapped base address of mmio resource.
444 * @node: node in sub_features linked list.
445 */
446struct dfl_feature_info {
447 u64 fid;
448 struct resource mmio_res;
449 void __iomem *ioaddr;
450 struct list_head node;
451};
452
453static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
454 struct platform_device *port)
455{
456 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
457
458 mutex_lock(&cdev->lock);
459 list_add(&pdata->node, &cdev->port_dev_list);
460 get_device(&pdata->dev->dev);
461 mutex_unlock(&cdev->lock);
462}
463
464/*
465 * register current feature device, it is called when we need to switch to
466 * another feature parsing or we have parsed all features on given device
467 * feature list.
468 */
469static int build_info_commit_dev(struct build_feature_devs_info *binfo)
470{
471 struct platform_device *fdev = binfo->feature_dev;
472 struct dfl_feature_platform_data *pdata;
473 struct dfl_feature_info *finfo, *p;
474 enum dfl_id_type type;
475 int ret, index = 0;
476
477 if (!fdev)
478 return 0;
479
480 type = feature_dev_id_type(fdev);
481 if (WARN_ON_ONCE(type >= DFL_ID_MAX))
482 return -EINVAL;
483
484 /*
485 * we do not need to care for the memory which is associated with
486 * the platform device. After calling platform_device_unregister(),
487 * it will be automatically freed by device's release() callback,
488 * platform_device_release().
489 */
490 pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
491 GFP_KERNEL);
492 if (!pdata)
493 return -ENOMEM;
494
495 pdata->dev = fdev;
496 pdata->num = binfo->feature_num;
497 pdata->dfl_cdev = binfo->cdev;
498 pdata->id = FEATURE_DEV_ID_UNUSED;
499 mutex_init(&pdata->lock);
500 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
501 dfl_pdata_key_strings[type]);
502
503 /*
504 * the count should be initialized to 0 to make sure
505 *__fpga_port_enable() following __fpga_port_disable()
506 * works properly for port device.
507 * and it should always be 0 for fme device.
508 */
509 WARN_ON(pdata->disable_count);
510
511 fdev->dev.platform_data = pdata;
512
513 /* each sub feature has one MMIO resource */
514 fdev->num_resources = binfo->feature_num;
515 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
516 GFP_KERNEL);
517 if (!fdev->resource)
518 return -ENOMEM;
519
520 /* fill features and resource information for feature dev */
521 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
522 struct dfl_feature *feature = &pdata->features[index];
523
524 /* save resource information for each feature */
525 feature->id = finfo->fid;
526 feature->resource_index = index;
527 feature->ioaddr = finfo->ioaddr;
528 fdev->resource[index++] = finfo->mmio_res;
529
530 list_del(&finfo->node);
531 kfree(finfo);
532 }
533
534 ret = platform_device_add(binfo->feature_dev);
535 if (!ret) {
536 if (type == PORT_ID)
537 dfl_fpga_cdev_add_port_dev(binfo->cdev,
538 binfo->feature_dev);
539 else
540 binfo->cdev->fme_dev =
541 get_device(&binfo->feature_dev->dev);
542 /*
543 * reset it to avoid build_info_free() freeing their resource.
544 *
545 * The resource of successfully registered feature devices
546 * will be freed by platform_device_unregister(). See the
547 * comments in build_info_create_dev().
548 */
549 binfo->feature_dev = NULL;
550 }
551
552 return ret;
553}
554
555static int
556build_info_create_dev(struct build_feature_devs_info *binfo,
557 enum dfl_id_type type, void __iomem *ioaddr)
558{
559 struct platform_device *fdev;
560 int ret;
561
562 if (type >= DFL_ID_MAX)
563 return -EINVAL;
564
565 /* we will create a new device, commit current device first */
566 ret = build_info_commit_dev(binfo);
567 if (ret)
568 return ret;
569
570 /*
571 * we use -ENODEV as the initialization indicator which indicates
572 * whether the id need to be reclaimed
573 */
574 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
575 if (!fdev)
576 return -ENOMEM;
577
578 binfo->feature_dev = fdev;
579 binfo->feature_num = 0;
580 binfo->ioaddr = ioaddr;
581 INIT_LIST_HEAD(&binfo->sub_features);
582
583 fdev->id = dfl_id_alloc(type, &fdev->dev);
584 if (fdev->id < 0)
585 return fdev->id;
586
587 fdev->dev.parent = &binfo->cdev->region->dev;
588 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
589
590 return 0;
591}
592
593static void build_info_free(struct build_feature_devs_info *binfo)
594{
595 struct dfl_feature_info *finfo, *p;
596
597 /*
598 * it is a valid id, free it. See comments in
599 * build_info_create_dev()
600 */
601 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
602 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
603 binfo->feature_dev->id);
604
605 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
606 list_del(&finfo->node);
607 kfree(finfo);
608 }
609 }
610
611 platform_device_put(binfo->feature_dev);
612
613 devm_kfree(binfo->dev, binfo);
614}
615
616static inline u32 feature_size(void __iomem *start)
617{
618 u64 v = readq(start + DFH);
619 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
620 /* workaround for private features with invalid size, use 4K instead */
621 return ofst ? ofst : 4096;
622}
623
624static u64 feature_id(void __iomem *start)
625{
626 u64 v = readq(start + DFH);
627 u16 id = FIELD_GET(DFH_ID, v);
628 u8 type = FIELD_GET(DFH_TYPE, v);
629
630 if (type == DFH_TYPE_FIU)
631 return FEATURE_ID_FIU_HEADER;
632 else if (type == DFH_TYPE_PRIVATE)
633 return id;
634 else if (type == DFH_TYPE_AFU)
635 return FEATURE_ID_AFU;
636
637 WARN_ON(1);
638 return 0;
639}
640
641/*
642 * when create sub feature instances, for private features, it doesn't need
643 * to provide resource size and feature id as they could be read from DFH
644 * register. For afu sub feature, its register region only contains user
645 * defined registers, so never trust any information from it, just use the
646 * resource size information provided by its parent FIU.
647 */
648static int
649create_feature_instance(struct build_feature_devs_info *binfo,
650 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
651 resource_size_t size, u64 fid)
652{
653 struct dfl_feature_info *finfo;
654
655 /* read feature size and id if inputs are invalid */
656 size = size ? size : feature_size(dfl->ioaddr + ofst);
657 fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
658
659 if (dfl->len - ofst < size)
660 return -EINVAL;
661
662 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
663 if (!finfo)
664 return -ENOMEM;
665
666 finfo->fid = fid;
667 finfo->mmio_res.start = dfl->start + ofst;
668 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
669 finfo->mmio_res.flags = IORESOURCE_MEM;
670 finfo->ioaddr = dfl->ioaddr + ofst;
671
672 list_add_tail(&finfo->node, &binfo->sub_features);
673 binfo->feature_num++;
674
675 return 0;
676}
677
678static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
679 struct dfl_fpga_enum_dfl *dfl,
680 resource_size_t ofst)
681{
682 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
683 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
684
685 WARN_ON(!size);
686
687 return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
688}
689
690static int parse_feature_afu(struct build_feature_devs_info *binfo,
691 struct dfl_fpga_enum_dfl *dfl,
692 resource_size_t ofst)
693{
694 if (!binfo->feature_dev) {
695 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
696 return -EINVAL;
697 }
698
699 switch (feature_dev_id_type(binfo->feature_dev)) {
700 case PORT_ID:
701 return parse_feature_port_afu(binfo, dfl, ofst);
702 default:
703 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
704 binfo->feature_dev->name);
705 }
706
707 return 0;
708}
709
710static int parse_feature_fiu(struct build_feature_devs_info *binfo,
711 struct dfl_fpga_enum_dfl *dfl,
712 resource_size_t ofst)
713{
714 u32 id, offset;
715 u64 v;
716 int ret = 0;
717
718 v = readq(dfl->ioaddr + ofst + DFH);
719 id = FIELD_GET(DFH_ID, v);
720
721 /* create platform device for dfl feature dev */
722 ret = build_info_create_dev(binfo, dfh_id_to_type(id),
723 dfl->ioaddr + ofst);
724 if (ret)
725 return ret;
726
727 ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
728 if (ret)
729 return ret;
730 /*
731 * find and parse FIU's child AFU via its NEXT_AFU register.
732 * please note that only Port has valid NEXT_AFU pointer per spec.
733 */
734 v = readq(dfl->ioaddr + ofst + NEXT_AFU);
735
736 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
737 if (offset)
738 return parse_feature_afu(binfo, dfl, ofst + offset);
739
740 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
741
742 return ret;
743}
744
745static int parse_feature_private(struct build_feature_devs_info *binfo,
746 struct dfl_fpga_enum_dfl *dfl,
747 resource_size_t ofst)
748{
749 if (!binfo->feature_dev) {
750 dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
751 (unsigned long long)feature_id(dfl->ioaddr + ofst));
752 return -EINVAL;
753 }
754
755 return create_feature_instance(binfo, dfl, ofst, 0, 0);
756}
757
758/**
759 * parse_feature - parse a feature on given device feature list
760 *
761 * @binfo: build feature devices information.
762 * @dfl: device feature list to parse
763 * @ofst: offset to feature header on this device feature list
764 */
765static int parse_feature(struct build_feature_devs_info *binfo,
766 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
767{
768 u64 v;
769 u32 type;
770
771 v = readq(dfl->ioaddr + ofst + DFH);
772 type = FIELD_GET(DFH_TYPE, v);
773
774 switch (type) {
775 case DFH_TYPE_AFU:
776 return parse_feature_afu(binfo, dfl, ofst);
777 case DFH_TYPE_PRIVATE:
778 return parse_feature_private(binfo, dfl, ofst);
779 case DFH_TYPE_FIU:
780 return parse_feature_fiu(binfo, dfl, ofst);
781 default:
782 dev_info(binfo->dev,
783 "Feature Type %x is not supported.\n", type);
784 }
785
786 return 0;
787}
788
789static int parse_feature_list(struct build_feature_devs_info *binfo,
790 struct dfl_fpga_enum_dfl *dfl)
791{
792 void __iomem *start = dfl->ioaddr;
793 void __iomem *end = dfl->ioaddr + dfl->len;
794 int ret = 0;
795 u32 ofst = 0;
796 u64 v;
797
798 /* walk through the device feature list via DFH's next DFH pointer. */
799 for (; start < end; start += ofst) {
800 if (end - start < DFH_SIZE) {
801 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
802 return -EINVAL;
803 }
804
805 ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
806 if (ret)
807 return ret;
808
809 v = readq(start + DFH);
810 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
811
812 /* stop parsing if EOL(End of List) is set or offset is 0 */
813 if ((v & DFH_EOL) || !ofst)
814 break;
815 }
816
817 /* commit current feature device when reach the end of list */
818 return build_info_commit_dev(binfo);
819}
820
821struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
822{
823 struct dfl_fpga_enum_info *info;
824
825 get_device(dev);
826
827 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
828 if (!info) {
829 put_device(dev);
830 return NULL;
831 }
832
833 info->dev = dev;
834 INIT_LIST_HEAD(&info->dfls);
835
836 return info;
837}
838EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
839
840void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
841{
842 struct dfl_fpga_enum_dfl *tmp, *dfl;
843 struct device *dev;
844
845 if (!info)
846 return;
847
848 dev = info->dev;
849
850 /* remove all device feature lists in the list. */
851 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
852 list_del(&dfl->node);
853 devm_kfree(dev, dfl);
854 }
855
856 devm_kfree(dev, info);
857 put_device(dev);
858}
859EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
860
861/**
862 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
863 *
864 * @info: ptr to dfl_fpga_enum_info
865 * @start: mmio resource address of the device feature list.
866 * @len: mmio resource length of the device feature list.
867 * @ioaddr: mapped mmio resource address of the device feature list.
868 *
869 * One FPGA device may have one or more Device Feature Lists (DFLs), use this
870 * function to add information of each DFL to common data structure for next
871 * step enumeration.
872 *
873 * Return: 0 on success, negative error code otherwise.
874 */
875int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
876 resource_size_t start, resource_size_t len,
877 void __iomem *ioaddr)
878{
879 struct dfl_fpga_enum_dfl *dfl;
880
881 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
882 if (!dfl)
883 return -ENOMEM;
884
885 dfl->start = start;
886 dfl->len = len;
887 dfl->ioaddr = ioaddr;
888
889 list_add_tail(&dfl->node, &info->dfls);
890
891 return 0;
892}
893EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
894
895static int remove_feature_dev(struct device *dev, void *data)
896{
897 struct platform_device *pdev = to_platform_device(dev);
898 enum dfl_id_type type = feature_dev_id_type(pdev);
899 int id = pdev->id;
900
901 platform_device_unregister(pdev);
902
903 dfl_id_free(type, id);
904
905 return 0;
906}
907
908static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
909{
910 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
911}
912
913/**
914 * dfl_fpga_feature_devs_enumerate - enumerate feature devices
915 * @info: information for enumeration.
916 *
917 * This function creates a container device (base FPGA region), enumerates
918 * feature devices based on the enumeration info and creates platform devices
919 * under the container device.
920 *
921 * Return: dfl_fpga_cdev struct on success, -errno on failure
922 */
923struct dfl_fpga_cdev *
924dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
925{
926 struct build_feature_devs_info *binfo;
927 struct dfl_fpga_enum_dfl *dfl;
928 struct dfl_fpga_cdev *cdev;
929 int ret = 0;
930
931 if (!info->dev)
932 return ERR_PTR(-ENODEV);
933
934 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
935 if (!cdev)
936 return ERR_PTR(-ENOMEM);
937
938 cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
939 if (!cdev->region) {
940 ret = -ENOMEM;
941 goto free_cdev_exit;
942 }
943
944 cdev->parent = info->dev;
945 mutex_init(&cdev->lock);
946 INIT_LIST_HEAD(&cdev->port_dev_list);
947
948 ret = fpga_region_register(cdev->region);
949 if (ret)
950 goto free_cdev_exit;
951
952 /* create and init build info for enumeration */
953 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
954 if (!binfo) {
955 ret = -ENOMEM;
956 goto unregister_region_exit;
957 }
958
959 binfo->dev = info->dev;
960 binfo->cdev = cdev;
961
962 /*
963 * start enumeration for all feature devices based on Device Feature
964 * Lists.
965 */
966 list_for_each_entry(dfl, &info->dfls, node) {
967 ret = parse_feature_list(binfo, dfl);
968 if (ret) {
969 remove_feature_devs(cdev);
970 build_info_free(binfo);
971 goto unregister_region_exit;
972 }
973 }
974
975 build_info_free(binfo);
976
977 return cdev;
978
979unregister_region_exit:
980 fpga_region_unregister(cdev->region);
981free_cdev_exit:
982 devm_kfree(info->dev, cdev);
983 return ERR_PTR(ret);
984}
985EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
986
987/**
988 * dfl_fpga_feature_devs_remove - remove all feature devices
989 * @cdev: fpga container device.
990 *
991 * Remove the container device and all feature devices under given container
992 * devices.
993 */
994void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
995{
996 struct dfl_feature_platform_data *pdata, *ptmp;
997
998 mutex_lock(&cdev->lock);
999 if (cdev->fme_dev)
1000 put_device(cdev->fme_dev);
1001
1002 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
1003 struct platform_device *port_dev = pdata->dev;
1004
1005 /* remove released ports */
1006 if (!device_is_registered(&port_dev->dev)) {
1007 dfl_id_free(feature_dev_id_type(port_dev),
1008 port_dev->id);
1009 platform_device_put(port_dev);
1010 }
1011
1012 list_del(&pdata->node);
1013 put_device(&port_dev->dev);
1014 }
1015 mutex_unlock(&cdev->lock);
1016
1017 remove_feature_devs(cdev);
1018
1019 fpga_region_unregister(cdev->region);
1020 devm_kfree(cdev->parent, cdev);
1021}
1022EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1023
1024/**
1025 * __dfl_fpga_cdev_find_port - find a port under given container device
1026 *
1027 * @cdev: container device
1028 * @data: data passed to match function
1029 * @match: match function used to find specific port from the port device list
1030 *
1031 * Find a port device under container device. This function needs to be
1032 * invoked with lock held.
1033 *
1034 * Return: pointer to port's platform device if successful, NULL otherwise.
1035 *
1036 * NOTE: you will need to drop the device reference with put_device() after use.
1037 */
1038struct platform_device *
1039__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1040 int (*match)(struct platform_device *, void *))
1041{
1042 struct dfl_feature_platform_data *pdata;
1043 struct platform_device *port_dev;
1044
1045 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1046 port_dev = pdata->dev;
1047
1048 if (match(port_dev, data) && get_device(&port_dev->dev))
1049 return port_dev;
1050 }
1051
1052 return NULL;
1053}
1054EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1055
1056static int __init dfl_fpga_init(void)
1057{
1058 int ret;
1059
1060 dfl_ids_init();
1061
1062 ret = dfl_chardev_init();
1063 if (ret)
1064 dfl_ids_destroy();
1065
1066 return ret;
1067}
1068
1069/**
1070 * dfl_fpga_cdev_release_port - release a port platform device
1071 *
1072 * @cdev: parent container device.
1073 * @port_id: id of the port platform device.
1074 *
1075 * This function allows user to release a port platform device. This is a
1076 * mandatory step before turn a port from PF into VF for SRIOV support.
1077 *
1078 * Return: 0 on success, negative error code otherwise.
1079 */
1080int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1081{
1082 struct platform_device *port_pdev;
1083 int ret = -ENODEV;
1084
1085 mutex_lock(&cdev->lock);
1086 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1087 dfl_fpga_check_port_id);
1088 if (!port_pdev)
1089 goto unlock_exit;
1090
1091 if (!device_is_registered(&port_pdev->dev)) {
1092 ret = -EBUSY;
1093 goto put_dev_exit;
1094 }
1095
1096 ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
1097 if (ret)
1098 goto put_dev_exit;
1099
1100 platform_device_del(port_pdev);
1101 cdev->released_port_num++;
1102put_dev_exit:
1103 put_device(&port_pdev->dev);
1104unlock_exit:
1105 mutex_unlock(&cdev->lock);
1106 return ret;
1107}
1108EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1109
1110/**
1111 * dfl_fpga_cdev_assign_port - assign a port platform device back
1112 *
1113 * @cdev: parent container device.
1114 * @port_id: id of the port platform device.
1115 *
1116 * This function allows user to assign a port platform device back. This is
1117 * a mandatory step after disable SRIOV support.
1118 *
1119 * Return: 0 on success, negative error code otherwise.
1120 */
1121int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1122{
1123 struct platform_device *port_pdev;
1124 int ret = -ENODEV;
1125
1126 mutex_lock(&cdev->lock);
1127 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1128 dfl_fpga_check_port_id);
1129 if (!port_pdev)
1130 goto unlock_exit;
1131
1132 if (device_is_registered(&port_pdev->dev)) {
1133 ret = -EBUSY;
1134 goto put_dev_exit;
1135 }
1136
1137 ret = platform_device_add(port_pdev);
1138 if (ret)
1139 goto put_dev_exit;
1140
1141 dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
1142 cdev->released_port_num--;
1143put_dev_exit:
1144 put_device(&port_pdev->dev);
1145unlock_exit:
1146 mutex_unlock(&cdev->lock);
1147 return ret;
1148}
1149EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1150
1151static void config_port_access_mode(struct device *fme_dev, int port_id,
1152 bool is_vf)
1153{
1154 void __iomem *base;
1155 u64 v;
1156
1157 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1158
1159 v = readq(base + FME_HDR_PORT_OFST(port_id));
1160
1161 v &= ~FME_PORT_OFST_ACC_CTRL;
1162 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1163 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1164
1165 writeq(v, base + FME_HDR_PORT_OFST(port_id));
1166}
1167
1168#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1169#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1170
1171/**
1172 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1173 *
1174 * @cdev: parent container device.
1175 *
1176 * This function is needed in sriov configuration routine. It could be used to
1177 * configure the all released ports from VF access mode to PF.
1178 */
1179void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1180{
1181 struct dfl_feature_platform_data *pdata;
1182
1183 mutex_lock(&cdev->lock);
1184 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1185 if (device_is_registered(&pdata->dev->dev))
1186 continue;
1187
1188 config_port_pf_mode(cdev->fme_dev, pdata->id);
1189 }
1190 mutex_unlock(&cdev->lock);
1191}
1192EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1193
1194/**
1195 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1196 *
1197 * @cdev: parent container device.
1198 * @num_vfs: VF device number.
1199 *
1200 * This function is needed in sriov configuration routine. It could be used to
1201 * configure the released ports from PF access mode to VF.
1202 *
1203 * Return: 0 on success, negative error code otherwise.
1204 */
1205int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1206{
1207 struct dfl_feature_platform_data *pdata;
1208 int ret = 0;
1209
1210 mutex_lock(&cdev->lock);
1211 /*
1212 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1213 * device, so if released port number doesn't match VF device number,
1214 * then reject the request with -EINVAL error code.
1215 */
1216 if (cdev->released_port_num != num_vfs) {
1217 ret = -EINVAL;
1218 goto done;
1219 }
1220
1221 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1222 if (device_is_registered(&pdata->dev->dev))
1223 continue;
1224
1225 config_port_vf_mode(cdev->fme_dev, pdata->id);
1226 }
1227done:
1228 mutex_unlock(&cdev->lock);
1229 return ret;
1230}
1231EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1232
1233static void __exit dfl_fpga_exit(void)
1234{
1235 dfl_chardev_uinit();
1236 dfl_ids_destroy();
1237}
1238
1239module_init(dfl_fpga_init);
1240module_exit(dfl_fpga_exit);
1241
1242MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1243MODULE_AUTHOR("Intel Corporation");
1244MODULE_LICENSE("GPL v2");