Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Endpoint Function Driver to implement Non-Transparent Bridge functionality
4 * Between PCI RC and EP
5 *
6 * Copyright (C) 2020 Texas Instruments
7 * Copyright (C) 2022 NXP
8 *
9 * Based on pci-epf-ntb.c
10 * Author: Frank Li <Frank.Li@nxp.com>
11 * Author: Kishon Vijay Abraham I <kishon@ti.com>
12 */
13
14/*
15 * +------------+ +---------------------------------------+
16 * | | | |
17 * +------------+ | +--------------+
18 * | NTB | | | NTB |
19 * | NetDev | | | NetDev |
20 * +------------+ | +--------------+
21 * | NTB | | | NTB |
22 * | Transfer | | | Transfer |
23 * +------------+ | +--------------+
24 * | | | | |
25 * | PCI NTB | | | |
26 * | EPF | | | |
27 * | Driver | | | PCI Virtual |
28 * | | +---------------+ | NTB Driver |
29 * | | | PCI EP NTB |<------>| |
30 * | | | FN Driver | | |
31 * +------------+ +---------------+ +--------------+
32 * | | | | | |
33 * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
34 * | | PCI | | | Bus |
35 * +------------+ +---------------+--------+--------------+
36 * PCIe Root Port PCI EP
37 */
38
39#include <linux/delay.h>
40#include <linux/io.h>
41#include <linux/module.h>
42#include <linux/slab.h>
43
44#include <linux/pci-epc.h>
45#include <linux/pci-epf.h>
46#include <linux/ntb.h>
47
48static struct workqueue_struct *kpcintb_workqueue;
49
50#define COMMAND_CONFIGURE_DOORBELL 1
51#define COMMAND_TEARDOWN_DOORBELL 2
52#define COMMAND_CONFIGURE_MW 3
53#define COMMAND_TEARDOWN_MW 4
54#define COMMAND_LINK_UP 5
55#define COMMAND_LINK_DOWN 6
56
57#define COMMAND_STATUS_OK 1
58#define COMMAND_STATUS_ERROR 2
59
60#define LINK_STATUS_UP BIT(0)
61
62#define SPAD_COUNT 64
63#define DB_COUNT 4
64#define NTB_MW_OFFSET 2
65#define DB_COUNT_MASK GENMASK(15, 0)
66#define MSIX_ENABLE BIT(16)
67#define MAX_DB_COUNT 32
68#define MAX_MW 4
69
70enum epf_ntb_bar {
71 BAR_CONFIG,
72 BAR_DB,
73 BAR_MW0,
74 BAR_MW1,
75 BAR_MW2,
76};
77
78/*
79 * +--------------------------------------------------+ Base
80 * | |
81 * | |
82 * | |
83 * | Common Control Register |
84 * | |
85 * | |
86 * | |
87 * +-----------------------+--------------------------+ Base+spad_offset
88 * | | |
89 * | Peer Spad Space | Spad Space |
90 * | | |
91 * | | |
92 * +-----------------------+--------------------------+ Base+spad_offset
93 * | | | +spad_count * 4
94 * | | |
95 * | Spad Space | Peer Spad Space |
96 * | | |
97 * +-----------------------+--------------------------+
98 * Virtual PCI PCIe Endpoint
99 * NTB Driver NTB Driver
100 */
101struct epf_ntb_ctrl {
102 u32 command;
103 u32 argument;
104 u16 command_status;
105 u16 link_status;
106 u32 topology;
107 u64 addr;
108 u64 size;
109 u32 num_mws;
110 u32 reserved;
111 u32 spad_offset;
112 u32 spad_count;
113 u32 db_entry_size;
114 u32 db_data[MAX_DB_COUNT];
115 u32 db_offset[MAX_DB_COUNT];
116} __packed;
117
118struct epf_ntb {
119 struct ntb_dev ntb;
120 struct pci_epf *epf;
121 struct config_group group;
122
123 u32 num_mws;
124 u32 db_count;
125 u32 spad_count;
126 u64 mws_size[MAX_MW];
127 u64 db;
128 u32 vbus_number;
129 u16 vntb_pid;
130 u16 vntb_vid;
131
132 bool linkup;
133 u32 spad_size;
134
135 enum pci_barno epf_ntb_bar[6];
136
137 struct epf_ntb_ctrl *reg;
138
139 u32 *epf_db;
140
141 phys_addr_t vpci_mw_phy[MAX_MW];
142 void __iomem *vpci_mw_addr[MAX_MW];
143
144 struct delayed_work cmd_handler;
145};
146
147#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
148#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
149
150static struct pci_epf_header epf_ntb_header = {
151 .vendorid = PCI_ANY_ID,
152 .deviceid = PCI_ANY_ID,
153 .baseclass_code = PCI_BASE_CLASS_MEMORY,
154 .interrupt_pin = PCI_INTERRUPT_INTA,
155};
156
157/**
158 * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
159 * @ntb: NTB device that facilitates communication between HOST and VHOST
160 * @link_up: true or false indicating Link is UP or Down
161 *
162 * Once NTB function in HOST invoke ntb_link_enable(),
163 * this NTB function driver will trigger a link event to VHOST.
164 *
165 * Returns: Zero for success, or an error code in case of failure
166 */
167static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
168{
169 if (link_up)
170 ntb->reg->link_status |= LINK_STATUS_UP;
171 else
172 ntb->reg->link_status &= ~LINK_STATUS_UP;
173
174 ntb_link_event(&ntb->ntb);
175 return 0;
176}
177
178/**
179 * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
180 * to access the memory window of HOST
181 * @ntb: NTB device that facilitates communication between HOST and VHOST
182 * @mw: Index of the memory window (either 0, 1, 2 or 3)
183 *
184 * EP Outbound Window
185 * +--------+ +-----------+
186 * | | | |
187 * | | | |
188 * | | | |
189 * | | | |
190 * | | +-----------+
191 * | Virtual| | Memory Win|
192 * | NTB | -----------> | |
193 * | Driver | | |
194 * | | +-----------+
195 * | | | |
196 * | | | |
197 * +--------+ +-----------+
198 * VHOST PCI EP
199 *
200 * Returns: Zero for success, or an error code in case of failure
201 */
202static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
203{
204 phys_addr_t phys_addr;
205 u8 func_no, vfunc_no;
206 u64 addr, size;
207 int ret = 0;
208
209 phys_addr = ntb->vpci_mw_phy[mw];
210 addr = ntb->reg->addr;
211 size = ntb->reg->size;
212
213 func_no = ntb->epf->func_no;
214 vfunc_no = ntb->epf->vfunc_no;
215
216 ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
217 if (ret)
218 dev_err(&ntb->epf->epc->dev,
219 "Failed to map memory window %d address\n", mw);
220 return ret;
221}
222
223/**
224 * epf_ntb_teardown_mw() - Teardown the configured OB ATU
225 * @ntb: NTB device that facilitates communication between HOST and VHOST
226 * @mw: Index of the memory window (either 0, 1, 2 or 3)
227 *
228 * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
229 * pci_epc_unmap_addr()
230 */
231static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
232{
233 pci_epc_unmap_addr(ntb->epf->epc,
234 ntb->epf->func_no,
235 ntb->epf->vfunc_no,
236 ntb->vpci_mw_phy[mw]);
237}
238
239/**
240 * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
241 * @work: work_struct for the epf_ntb_epc
242 *
243 * Workqueue function that gets invoked for the two epf_ntb_epc
244 * periodically (once every 5ms) to see if it has received any commands
245 * from NTB HOST. The HOST can send commands to configure doorbell or
246 * configure memory window or to update link status.
247 */
248static void epf_ntb_cmd_handler(struct work_struct *work)
249{
250 struct epf_ntb_ctrl *ctrl;
251 u32 command, argument;
252 struct epf_ntb *ntb;
253 struct device *dev;
254 int ret;
255 int i;
256
257 ntb = container_of(work, struct epf_ntb, cmd_handler.work);
258
259 for (i = 1; i < ntb->db_count; i++) {
260 if (ntb->epf_db[i]) {
261 ntb->db |= 1 << (i - 1);
262 ntb_db_event(&ntb->ntb, i);
263 ntb->epf_db[i] = 0;
264 }
265 }
266
267 ctrl = ntb->reg;
268 command = ctrl->command;
269 if (!command)
270 goto reset_handler;
271 argument = ctrl->argument;
272
273 ctrl->command = 0;
274 ctrl->argument = 0;
275
276 ctrl = ntb->reg;
277 dev = &ntb->epf->dev;
278
279 switch (command) {
280 case COMMAND_CONFIGURE_DOORBELL:
281 ctrl->command_status = COMMAND_STATUS_OK;
282 break;
283 case COMMAND_TEARDOWN_DOORBELL:
284 ctrl->command_status = COMMAND_STATUS_OK;
285 break;
286 case COMMAND_CONFIGURE_MW:
287 ret = epf_ntb_configure_mw(ntb, argument);
288 if (ret < 0)
289 ctrl->command_status = COMMAND_STATUS_ERROR;
290 else
291 ctrl->command_status = COMMAND_STATUS_OK;
292 break;
293 case COMMAND_TEARDOWN_MW:
294 epf_ntb_teardown_mw(ntb, argument);
295 ctrl->command_status = COMMAND_STATUS_OK;
296 break;
297 case COMMAND_LINK_UP:
298 ntb->linkup = true;
299 ret = epf_ntb_link_up(ntb, true);
300 if (ret < 0)
301 ctrl->command_status = COMMAND_STATUS_ERROR;
302 else
303 ctrl->command_status = COMMAND_STATUS_OK;
304 goto reset_handler;
305 case COMMAND_LINK_DOWN:
306 ntb->linkup = false;
307 ret = epf_ntb_link_up(ntb, false);
308 if (ret < 0)
309 ctrl->command_status = COMMAND_STATUS_ERROR;
310 else
311 ctrl->command_status = COMMAND_STATUS_OK;
312 break;
313 default:
314 dev_err(dev, "UNKNOWN command: %d\n", command);
315 break;
316 }
317
318reset_handler:
319 queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
320 msecs_to_jiffies(5));
321}
322
323/**
324 * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
325 * @ntb: EPC associated with one of the HOST which holds peer's outbound
326 * address.
327 *
328 * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
329 * self scratchpad region (removes inbound ATU configuration). While BAR0 is
330 * the default self scratchpad BAR, an NTB could have other BARs for self
331 * scratchpad (because of reserved BARs). This function can get the exact BAR
332 * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
333 *
334 * Please note the self scratchpad region and config region is combined to
335 * a single region and mapped using the same BAR. Also note VHOST's peer
336 * scratchpad is HOST's self scratchpad.
337 *
338 * Returns: void
339 */
340static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
341{
342 struct pci_epf_bar *epf_bar;
343 enum pci_barno barno;
344
345 barno = ntb->epf_ntb_bar[BAR_CONFIG];
346 epf_bar = &ntb->epf->bar[barno];
347
348 pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
349}
350
351/**
352 * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
353 * @ntb: NTB device that facilitates communication between HOST and VHOST
354 *
355 * Map BAR0 of EP CONTROLLER which contains the VHOST's config and
356 * self scratchpad region.
357 *
358 * Please note the self scratchpad region and config region is combined to
359 * a single region and mapped using the same BAR.
360 *
361 * Returns: Zero for success, or an error code in case of failure
362 */
363static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
364{
365 struct pci_epf_bar *epf_bar;
366 enum pci_barno barno;
367 u8 func_no, vfunc_no;
368 struct device *dev;
369 int ret;
370
371 dev = &ntb->epf->dev;
372 func_no = ntb->epf->func_no;
373 vfunc_no = ntb->epf->vfunc_no;
374 barno = ntb->epf_ntb_bar[BAR_CONFIG];
375 epf_bar = &ntb->epf->bar[barno];
376
377 ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
378 if (ret) {
379 dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
380 return ret;
381 }
382 return 0;
383}
384
385/**
386 * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
387 * config + scratchpad region
388 * @ntb: NTB device that facilitates communication between HOST and VHOST
389 */
390static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
391{
392 enum pci_barno barno;
393
394 barno = ntb->epf_ntb_bar[BAR_CONFIG];
395 pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
396}
397
398/**
399 * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
400 * region
401 * @ntb: NTB device that facilitates communication between HOST and VHOST
402 *
403 * Allocate the Local Memory mentioned in the above diagram. The size of
404 * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
405 * is obtained from "spad-count" configfs entry.
406 *
407 * Returns: Zero for success, or an error code in case of failure
408 */
409static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
410{
411 size_t align;
412 enum pci_barno barno;
413 struct epf_ntb_ctrl *ctrl;
414 u32 spad_size, ctrl_size;
415 u64 size;
416 struct pci_epf *epf = ntb->epf;
417 struct device *dev = &epf->dev;
418 u32 spad_count;
419 void *base;
420 int i;
421 const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
422 epf->func_no,
423 epf->vfunc_no);
424 barno = ntb->epf_ntb_bar[BAR_CONFIG];
425 size = epc_features->bar_fixed_size[barno];
426 align = epc_features->align;
427
428 if ((!IS_ALIGNED(size, align)))
429 return -EINVAL;
430
431 spad_count = ntb->spad_count;
432
433 ctrl_size = sizeof(struct epf_ntb_ctrl);
434 spad_size = 2 * spad_count * sizeof(u32);
435
436 if (!align) {
437 ctrl_size = roundup_pow_of_two(ctrl_size);
438 spad_size = roundup_pow_of_two(spad_size);
439 } else {
440 ctrl_size = ALIGN(ctrl_size, align);
441 spad_size = ALIGN(spad_size, align);
442 }
443
444 if (!size)
445 size = ctrl_size + spad_size;
446 else if (size < ctrl_size + spad_size)
447 return -EINVAL;
448
449 base = pci_epf_alloc_space(epf, size, barno, align, 0);
450 if (!base) {
451 dev_err(dev, "Config/Status/SPAD alloc region fail\n");
452 return -ENOMEM;
453 }
454
455 ntb->reg = base;
456
457 ctrl = ntb->reg;
458 ctrl->spad_offset = ctrl_size;
459
460 ctrl->spad_count = spad_count;
461 ctrl->num_mws = ntb->num_mws;
462 ntb->spad_size = spad_size;
463
464 ctrl->db_entry_size = sizeof(u32);
465
466 for (i = 0; i < ntb->db_count; i++) {
467 ntb->reg->db_data[i] = 1 + i;
468 ntb->reg->db_offset[i] = 0;
469 }
470
471 return 0;
472}
473
474/**
475 * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
476 * @ntb: NTB device that facilitates communication between HOST and VHOST
477 *
478 * Configure MSI/MSI-X capability for each interface with number of
479 * interrupts equal to "db_count" configfs entry.
480 *
481 * Returns: Zero for success, or an error code in case of failure
482 */
483static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
484{
485 const struct pci_epc_features *epc_features;
486 struct device *dev;
487 u32 db_count;
488 int ret;
489
490 dev = &ntb->epf->dev;
491
492 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
493
494 if (!(epc_features->msix_capable || epc_features->msi_capable)) {
495 dev_err(dev, "MSI or MSI-X is required for doorbell\n");
496 return -EINVAL;
497 }
498
499 db_count = ntb->db_count;
500 if (db_count > MAX_DB_COUNT) {
501 dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
502 return -EINVAL;
503 }
504
505 ntb->db_count = db_count;
506
507 if (epc_features->msi_capable) {
508 ret = pci_epc_set_msi(ntb->epf->epc,
509 ntb->epf->func_no,
510 ntb->epf->vfunc_no,
511 16);
512 if (ret) {
513 dev_err(dev, "MSI configuration failed\n");
514 return ret;
515 }
516 }
517
518 return 0;
519}
520
521/**
522 * epf_ntb_db_bar_init() - Configure Doorbell window BARs
523 * @ntb: NTB device that facilitates communication between HOST and VHOST
524 *
525 * Returns: Zero for success, or an error code in case of failure
526 */
527static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
528{
529 const struct pci_epc_features *epc_features;
530 u32 align;
531 struct device *dev = &ntb->epf->dev;
532 int ret;
533 struct pci_epf_bar *epf_bar;
534 void __iomem *mw_addr;
535 enum pci_barno barno;
536 size_t size = sizeof(u32) * ntb->db_count;
537
538 epc_features = pci_epc_get_features(ntb->epf->epc,
539 ntb->epf->func_no,
540 ntb->epf->vfunc_no);
541 align = epc_features->align;
542
543 if (size < 128)
544 size = 128;
545
546 if (align)
547 size = ALIGN(size, align);
548 else
549 size = roundup_pow_of_two(size);
550
551 barno = ntb->epf_ntb_bar[BAR_DB];
552
553 mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
554 if (!mw_addr) {
555 dev_err(dev, "Failed to allocate OB address\n");
556 return -ENOMEM;
557 }
558
559 ntb->epf_db = mw_addr;
560
561 epf_bar = &ntb->epf->bar[barno];
562
563 ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
564 if (ret) {
565 dev_err(dev, "Doorbell BAR set failed\n");
566 goto err_alloc_peer_mem;
567 }
568 return ret;
569
570err_alloc_peer_mem:
571 pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
572 return -1;
573}
574
575static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
576
577/**
578 * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
579 * allocated in peer's outbound address space
580 * @ntb: NTB device that facilitates communication between HOST and VHOST
581 */
582static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
583{
584 enum pci_barno barno;
585
586 barno = ntb->epf_ntb_bar[BAR_DB];
587 pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
588 pci_epc_clear_bar(ntb->epf->epc,
589 ntb->epf->func_no,
590 ntb->epf->vfunc_no,
591 &ntb->epf->bar[barno]);
592}
593
594/**
595 * epf_ntb_mw_bar_init() - Configure Memory window BARs
596 * @ntb: NTB device that facilitates communication between HOST and VHOST
597 *
598 * Returns: Zero for success, or an error code in case of failure
599 */
600static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
601{
602 int ret = 0;
603 int i;
604 u64 size;
605 enum pci_barno barno;
606 struct device *dev = &ntb->epf->dev;
607
608 for (i = 0; i < ntb->num_mws; i++) {
609 size = ntb->mws_size[i];
610 barno = ntb->epf_ntb_bar[BAR_MW0 + i];
611
612 ntb->epf->bar[barno].barno = barno;
613 ntb->epf->bar[barno].size = size;
614 ntb->epf->bar[barno].addr = NULL;
615 ntb->epf->bar[barno].phys_addr = 0;
616 ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
617 PCI_BASE_ADDRESS_MEM_TYPE_64 :
618 PCI_BASE_ADDRESS_MEM_TYPE_32;
619
620 ret = pci_epc_set_bar(ntb->epf->epc,
621 ntb->epf->func_no,
622 ntb->epf->vfunc_no,
623 &ntb->epf->bar[barno]);
624 if (ret) {
625 dev_err(dev, "MW set failed\n");
626 goto err_alloc_mem;
627 }
628
629 /* Allocate EPC outbound memory windows to vpci vntb device */
630 ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
631 &ntb->vpci_mw_phy[i],
632 size);
633 if (!ntb->vpci_mw_addr[i]) {
634 ret = -ENOMEM;
635 dev_err(dev, "Failed to allocate source address\n");
636 goto err_set_bar;
637 }
638 }
639
640 return ret;
641
642err_set_bar:
643 pci_epc_clear_bar(ntb->epf->epc,
644 ntb->epf->func_no,
645 ntb->epf->vfunc_no,
646 &ntb->epf->bar[barno]);
647err_alloc_mem:
648 epf_ntb_mw_bar_clear(ntb, i);
649 return ret;
650}
651
652/**
653 * epf_ntb_mw_bar_clear() - Clear Memory window BARs
654 * @ntb: NTB device that facilitates communication between HOST and VHOST
655 * @num_mws: the number of Memory window BARs that to be cleared
656 */
657static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
658{
659 enum pci_barno barno;
660 int i;
661
662 for (i = 0; i < num_mws; i++) {
663 barno = ntb->epf_ntb_bar[BAR_MW0 + i];
664 pci_epc_clear_bar(ntb->epf->epc,
665 ntb->epf->func_no,
666 ntb->epf->vfunc_no,
667 &ntb->epf->bar[barno]);
668
669 pci_epc_mem_free_addr(ntb->epf->epc,
670 ntb->vpci_mw_phy[i],
671 ntb->vpci_mw_addr[i],
672 ntb->mws_size[i]);
673 }
674}
675
676/**
677 * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
678 * @ntb: NTB device that facilitates communication between HOST and VHOST
679 *
680 * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
681 */
682static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
683{
684 pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
685 pci_epc_put(ntb->epf->epc);
686}
687
688/**
689 * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
690 * constructs (scratchpad region, doorbell, memorywindow)
691 * @ntb: NTB device that facilitates communication between HOST and VHOST
692 *
693 * Returns: Zero for success, or an error code in case of failure
694 */
695static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
696{
697 const struct pci_epc_features *epc_features;
698 enum pci_barno barno;
699 enum epf_ntb_bar bar;
700 struct device *dev;
701 u32 num_mws;
702 int i;
703
704 barno = BAR_0;
705 num_mws = ntb->num_mws;
706 dev = &ntb->epf->dev;
707 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
708
709 /* These are required BARs which are mandatory for NTB functionality */
710 for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
711 barno = pci_epc_get_next_free_bar(epc_features, barno);
712 if (barno < 0) {
713 dev_err(dev, "Fail to get NTB function BAR\n");
714 return barno;
715 }
716 ntb->epf_ntb_bar[bar] = barno;
717 }
718
719 /* These are optional BARs which don't impact NTB functionality */
720 for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
721 barno = pci_epc_get_next_free_bar(epc_features, barno);
722 if (barno < 0) {
723 ntb->num_mws = i;
724 dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
725 }
726 ntb->epf_ntb_bar[bar] = barno;
727 }
728
729 return 0;
730}
731
732/**
733 * epf_ntb_epc_init() - Initialize NTB interface
734 * @ntb: NTB device that facilitates communication between HOST and VHOST
735 *
736 * Wrapper to initialize a particular EPC interface and start the workqueue
737 * to check for commands from HOST. This function will write to the
738 * EP controller HW for configuring it.
739 *
740 * Returns: Zero for success, or an error code in case of failure
741 */
742static int epf_ntb_epc_init(struct epf_ntb *ntb)
743{
744 u8 func_no, vfunc_no;
745 struct pci_epc *epc;
746 struct pci_epf *epf;
747 struct device *dev;
748 int ret;
749
750 epf = ntb->epf;
751 dev = &epf->dev;
752 epc = epf->epc;
753 func_no = ntb->epf->func_no;
754 vfunc_no = ntb->epf->vfunc_no;
755
756 ret = epf_ntb_config_sspad_bar_set(ntb);
757 if (ret) {
758 dev_err(dev, "Config/self SPAD BAR init failed");
759 return ret;
760 }
761
762 ret = epf_ntb_configure_interrupt(ntb);
763 if (ret) {
764 dev_err(dev, "Interrupt configuration failed\n");
765 goto err_config_interrupt;
766 }
767
768 ret = epf_ntb_db_bar_init(ntb);
769 if (ret) {
770 dev_err(dev, "DB BAR init failed\n");
771 goto err_db_bar_init;
772 }
773
774 ret = epf_ntb_mw_bar_init(ntb);
775 if (ret) {
776 dev_err(dev, "MW BAR init failed\n");
777 goto err_mw_bar_init;
778 }
779
780 if (vfunc_no <= 1) {
781 ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
782 if (ret) {
783 dev_err(dev, "Configuration header write failed\n");
784 goto err_write_header;
785 }
786 }
787
788 INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
789 queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
790
791 return 0;
792
793err_write_header:
794 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
795err_mw_bar_init:
796 epf_ntb_db_bar_clear(ntb);
797err_db_bar_init:
798err_config_interrupt:
799 epf_ntb_config_sspad_bar_clear(ntb);
800
801 return ret;
802}
803
804
805/**
806 * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
807 * @ntb: NTB device that facilitates communication between HOST and VHOST
808 *
809 * Wrapper to cleanup all NTB interfaces.
810 */
811static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
812{
813 epf_ntb_db_bar_clear(ntb);
814 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
815}
816
817#define EPF_NTB_R(_name) \
818static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
819 char *page) \
820{ \
821 struct config_group *group = to_config_group(item); \
822 struct epf_ntb *ntb = to_epf_ntb(group); \
823 \
824 return sprintf(page, "%d\n", ntb->_name); \
825}
826
827#define EPF_NTB_W(_name) \
828static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
829 const char *page, size_t len) \
830{ \
831 struct config_group *group = to_config_group(item); \
832 struct epf_ntb *ntb = to_epf_ntb(group); \
833 u32 val; \
834 int ret; \
835 \
836 ret = kstrtou32(page, 0, &val); \
837 if (ret) \
838 return ret; \
839 \
840 ntb->_name = val; \
841 \
842 return len; \
843}
844
845#define EPF_NTB_MW_R(_name) \
846static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
847 char *page) \
848{ \
849 struct config_group *group = to_config_group(item); \
850 struct epf_ntb *ntb = to_epf_ntb(group); \
851 struct device *dev = &ntb->epf->dev; \
852 int win_no; \
853 \
854 if (sscanf(#_name, "mw%d", &win_no) != 1) \
855 return -EINVAL; \
856 \
857 if (win_no <= 0 || win_no > ntb->num_mws) { \
858 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
859 return -EINVAL; \
860 } \
861 \
862 return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
863}
864
865#define EPF_NTB_MW_W(_name) \
866static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
867 const char *page, size_t len) \
868{ \
869 struct config_group *group = to_config_group(item); \
870 struct epf_ntb *ntb = to_epf_ntb(group); \
871 struct device *dev = &ntb->epf->dev; \
872 int win_no; \
873 u64 val; \
874 int ret; \
875 \
876 ret = kstrtou64(page, 0, &val); \
877 if (ret) \
878 return ret; \
879 \
880 if (sscanf(#_name, "mw%d", &win_no) != 1) \
881 return -EINVAL; \
882 \
883 if (win_no <= 0 || win_no > ntb->num_mws) { \
884 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
885 return -EINVAL; \
886 } \
887 \
888 ntb->mws_size[win_no - 1] = val; \
889 \
890 return len; \
891}
892
893static ssize_t epf_ntb_num_mws_store(struct config_item *item,
894 const char *page, size_t len)
895{
896 struct config_group *group = to_config_group(item);
897 struct epf_ntb *ntb = to_epf_ntb(group);
898 u32 val;
899 int ret;
900
901 ret = kstrtou32(page, 0, &val);
902 if (ret)
903 return ret;
904
905 if (val > MAX_MW)
906 return -EINVAL;
907
908 ntb->num_mws = val;
909
910 return len;
911}
912
913EPF_NTB_R(spad_count)
914EPF_NTB_W(spad_count)
915EPF_NTB_R(db_count)
916EPF_NTB_W(db_count)
917EPF_NTB_R(num_mws)
918EPF_NTB_R(vbus_number)
919EPF_NTB_W(vbus_number)
920EPF_NTB_R(vntb_pid)
921EPF_NTB_W(vntb_pid)
922EPF_NTB_R(vntb_vid)
923EPF_NTB_W(vntb_vid)
924EPF_NTB_MW_R(mw1)
925EPF_NTB_MW_W(mw1)
926EPF_NTB_MW_R(mw2)
927EPF_NTB_MW_W(mw2)
928EPF_NTB_MW_R(mw3)
929EPF_NTB_MW_W(mw3)
930EPF_NTB_MW_R(mw4)
931EPF_NTB_MW_W(mw4)
932
933CONFIGFS_ATTR(epf_ntb_, spad_count);
934CONFIGFS_ATTR(epf_ntb_, db_count);
935CONFIGFS_ATTR(epf_ntb_, num_mws);
936CONFIGFS_ATTR(epf_ntb_, mw1);
937CONFIGFS_ATTR(epf_ntb_, mw2);
938CONFIGFS_ATTR(epf_ntb_, mw3);
939CONFIGFS_ATTR(epf_ntb_, mw4);
940CONFIGFS_ATTR(epf_ntb_, vbus_number);
941CONFIGFS_ATTR(epf_ntb_, vntb_pid);
942CONFIGFS_ATTR(epf_ntb_, vntb_vid);
943
944static struct configfs_attribute *epf_ntb_attrs[] = {
945 &epf_ntb_attr_spad_count,
946 &epf_ntb_attr_db_count,
947 &epf_ntb_attr_num_mws,
948 &epf_ntb_attr_mw1,
949 &epf_ntb_attr_mw2,
950 &epf_ntb_attr_mw3,
951 &epf_ntb_attr_mw4,
952 &epf_ntb_attr_vbus_number,
953 &epf_ntb_attr_vntb_pid,
954 &epf_ntb_attr_vntb_vid,
955 NULL,
956};
957
958static const struct config_item_type ntb_group_type = {
959 .ct_attrs = epf_ntb_attrs,
960 .ct_owner = THIS_MODULE,
961};
962
963/**
964 * epf_ntb_add_cfs() - Add configfs directory specific to NTB
965 * @epf: NTB endpoint function device
966 * @group: A pointer to the config_group structure referencing a group of
967 * config_items of a specific type that belong to a specific sub-system.
968 *
969 * Add configfs directory specific to NTB. This directory will hold
970 * NTB specific properties like db_count, spad_count, num_mws etc.,
971 *
972 * Returns: Pointer to config_group
973 */
974static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
975 struct config_group *group)
976{
977 struct epf_ntb *ntb = epf_get_drvdata(epf);
978 struct config_group *ntb_group = &ntb->group;
979 struct device *dev = &epf->dev;
980
981 config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
982
983 return ntb_group;
984}
985
986/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
987
988static u32 pci_space[] = {
989 0xffffffff, /* Device ID, Vendor ID */
990 0, /* Status, Command */
991 0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
992 0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
993 0, /* BAR 0 */
994 0, /* BAR 1 */
995 0, /* BAR 2 */
996 0, /* BAR 3 */
997 0, /* BAR 4 */
998 0, /* BAR 5 */
999 0, /* Cardbus CIS Pointer */
1000 0, /* Subsystem ID, Subsystem Vendor ID */
1001 0, /* ROM Base Address */
1002 0, /* Reserved, Capabilities Pointer */
1003 0, /* Reserved */
1004 0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
1005};
1006
1007static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
1008{
1009 if (devfn == 0) {
1010 memcpy(val, ((u8 *)pci_space) + where, size);
1011 return PCIBIOS_SUCCESSFUL;
1012 }
1013 return PCIBIOS_DEVICE_NOT_FOUND;
1014}
1015
1016static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
1017{
1018 return 0;
1019}
1020
1021static struct pci_ops vpci_ops = {
1022 .read = pci_read,
1023 .write = pci_write,
1024};
1025
1026static int vpci_scan_bus(void *sysdata)
1027{
1028 struct pci_bus *vpci_bus;
1029 struct epf_ntb *ndev = sysdata;
1030
1031 vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
1032 if (vpci_bus)
1033 pr_err("create pci bus\n");
1034
1035 pci_bus_add_devices(vpci_bus);
1036
1037 return 0;
1038}
1039
1040/*==================== Virtual PCIe NTB driver ==========================*/
1041
1042static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
1043{
1044 struct epf_ntb *ndev = ntb_ndev(ntb);
1045
1046 return ndev->num_mws;
1047}
1048
1049static int vntb_epf_spad_count(struct ntb_dev *ntb)
1050{
1051 return ntb_ndev(ntb)->spad_count;
1052}
1053
1054static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
1055{
1056 return ntb_ndev(ntb)->num_mws;
1057}
1058
1059static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
1060{
1061 return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
1062}
1063
1064static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1065{
1066 return 0;
1067}
1068
1069static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
1070 dma_addr_t addr, resource_size_t size)
1071{
1072 struct epf_ntb *ntb = ntb_ndev(ndev);
1073 struct pci_epf_bar *epf_bar;
1074 enum pci_barno barno;
1075 int ret;
1076 struct device *dev;
1077
1078 dev = &ntb->ntb.dev;
1079 barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
1080 epf_bar = &ntb->epf->bar[barno];
1081 epf_bar->phys_addr = addr;
1082 epf_bar->barno = barno;
1083 epf_bar->size = size;
1084
1085 ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
1086 if (ret) {
1087 dev_err(dev, "failure set mw trans\n");
1088 return ret;
1089 }
1090 return 0;
1091}
1092
1093static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
1094{
1095 return 0;
1096}
1097
1098static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
1099 phys_addr_t *base, resource_size_t *size)
1100{
1101
1102 struct epf_ntb *ntb = ntb_ndev(ndev);
1103
1104 if (base)
1105 *base = ntb->vpci_mw_phy[idx];
1106
1107 if (size)
1108 *size = ntb->mws_size[idx];
1109
1110 return 0;
1111}
1112
1113static int vntb_epf_link_enable(struct ntb_dev *ntb,
1114 enum ntb_speed max_speed,
1115 enum ntb_width max_width)
1116{
1117 return 0;
1118}
1119
1120static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
1121{
1122 struct epf_ntb *ntb = ntb_ndev(ndev);
1123 int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32);
1124 u32 val;
1125 void __iomem *base = (void __iomem *)ntb->reg;
1126
1127 val = readl(base + off + ct + idx * sizeof(u32));
1128 return val;
1129}
1130
1131static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
1132{
1133 struct epf_ntb *ntb = ntb_ndev(ndev);
1134 struct epf_ntb_ctrl *ctrl = ntb->reg;
1135 int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32);
1136 void __iomem *base = (void __iomem *)ntb->reg;
1137
1138 writel(val, base + off + ct + idx * sizeof(u32));
1139 return 0;
1140}
1141
1142static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
1143{
1144 struct epf_ntb *ntb = ntb_ndev(ndev);
1145 struct epf_ntb_ctrl *ctrl = ntb->reg;
1146 int off = ctrl->spad_offset;
1147 void __iomem *base = (void __iomem *)ntb->reg;
1148 u32 val;
1149
1150 val = readl(base + off + idx * sizeof(u32));
1151 return val;
1152}
1153
1154static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
1155{
1156 struct epf_ntb *ntb = ntb_ndev(ndev);
1157 struct epf_ntb_ctrl *ctrl = ntb->reg;
1158 int off = ctrl->spad_offset;
1159 void __iomem *base = (void __iomem *)ntb->reg;
1160
1161 writel(val, base + off + idx * sizeof(u32));
1162 return 0;
1163}
1164
1165static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
1166{
1167 u32 interrupt_num = ffs(db_bits) + 1;
1168 struct epf_ntb *ntb = ntb_ndev(ndev);
1169 u8 func_no, vfunc_no;
1170 int ret;
1171
1172 func_no = ntb->epf->func_no;
1173 vfunc_no = ntb->epf->vfunc_no;
1174
1175 ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
1176 PCI_IRQ_MSI, interrupt_num + 1);
1177 if (ret)
1178 dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
1179
1180 return ret;
1181}
1182
1183static u64 vntb_epf_db_read(struct ntb_dev *ndev)
1184{
1185 struct epf_ntb *ntb = ntb_ndev(ndev);
1186
1187 return ntb->db;
1188}
1189
1190static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
1191 resource_size_t *addr_align,
1192 resource_size_t *size_align,
1193 resource_size_t *size_max)
1194{
1195 struct epf_ntb *ntb = ntb_ndev(ndev);
1196
1197 if (addr_align)
1198 *addr_align = SZ_4K;
1199
1200 if (size_align)
1201 *size_align = 1;
1202
1203 if (size_max)
1204 *size_max = ntb->mws_size[idx];
1205
1206 return 0;
1207}
1208
1209static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
1210 enum ntb_speed *speed,
1211 enum ntb_width *width)
1212{
1213 struct epf_ntb *ntb = ntb_ndev(ndev);
1214
1215 return ntb->reg->link_status;
1216}
1217
1218static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
1219{
1220 return 0;
1221}
1222
1223static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
1224{
1225 struct epf_ntb *ntb = ntb_ndev(ndev);
1226
1227 ntb->db &= ~db_bits;
1228 return 0;
1229}
1230
1231static int vntb_epf_link_disable(struct ntb_dev *ntb)
1232{
1233 return 0;
1234}
1235
1236static const struct ntb_dev_ops vntb_epf_ops = {
1237 .mw_count = vntb_epf_mw_count,
1238 .spad_count = vntb_epf_spad_count,
1239 .peer_mw_count = vntb_epf_peer_mw_count,
1240 .db_valid_mask = vntb_epf_db_valid_mask,
1241 .db_set_mask = vntb_epf_db_set_mask,
1242 .mw_set_trans = vntb_epf_mw_set_trans,
1243 .mw_clear_trans = vntb_epf_mw_clear_trans,
1244 .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
1245 .link_enable = vntb_epf_link_enable,
1246 .spad_read = vntb_epf_spad_read,
1247 .spad_write = vntb_epf_spad_write,
1248 .peer_spad_read = vntb_epf_peer_spad_read,
1249 .peer_spad_write = vntb_epf_peer_spad_write,
1250 .peer_db_set = vntb_epf_peer_db_set,
1251 .db_read = vntb_epf_db_read,
1252 .mw_get_align = vntb_epf_mw_get_align,
1253 .link_is_up = vntb_epf_link_is_up,
1254 .db_clear_mask = vntb_epf_db_clear_mask,
1255 .db_clear = vntb_epf_db_clear,
1256 .link_disable = vntb_epf_link_disable,
1257};
1258
1259static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1260{
1261 int ret;
1262 struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
1263 struct device *dev = &pdev->dev;
1264
1265 ndev->ntb.pdev = pdev;
1266 ndev->ntb.topo = NTB_TOPO_NONE;
1267 ndev->ntb.ops = &vntb_epf_ops;
1268
1269 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1270 if (ret) {
1271 dev_err(dev, "Cannot set DMA mask\n");
1272 return -EINVAL;
1273 }
1274
1275 ret = ntb_register_device(&ndev->ntb);
1276 if (ret) {
1277 dev_err(dev, "Failed to register NTB device\n");
1278 goto err_register_dev;
1279 }
1280
1281 dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
1282 return 0;
1283
1284err_register_dev:
1285 put_device(&ndev->ntb.dev);
1286 return -EINVAL;
1287}
1288
1289static struct pci_device_id pci_vntb_table[] = {
1290 {
1291 PCI_DEVICE(0xffff, 0xffff),
1292 },
1293 {},
1294};
1295
1296static struct pci_driver vntb_pci_driver = {
1297 .name = "pci-vntb",
1298 .id_table = pci_vntb_table,
1299 .probe = pci_vntb_probe,
1300};
1301
1302/* ============ PCIe EPF Driver Bind ====================*/
1303
1304/**
1305 * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
1306 * @epf: NTB endpoint function device
1307 *
1308 * Initialize both the endpoint controllers associated with NTB function device.
1309 * Invoked when a primary interface or secondary interface is bound to EPC
1310 * device. This function will succeed only when EPC is bound to both the
1311 * interfaces.
1312 *
1313 * Returns: Zero for success, or an error code in case of failure
1314 */
1315static int epf_ntb_bind(struct pci_epf *epf)
1316{
1317 struct epf_ntb *ntb = epf_get_drvdata(epf);
1318 struct device *dev = &epf->dev;
1319 int ret;
1320
1321 if (!epf->epc) {
1322 dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
1323 return 0;
1324 }
1325
1326 ret = epf_ntb_init_epc_bar(ntb);
1327 if (ret) {
1328 dev_err(dev, "Failed to create NTB EPC\n");
1329 goto err_bar_init;
1330 }
1331
1332 ret = epf_ntb_config_spad_bar_alloc(ntb);
1333 if (ret) {
1334 dev_err(dev, "Failed to allocate BAR memory\n");
1335 goto err_bar_alloc;
1336 }
1337
1338 ret = epf_ntb_epc_init(ntb);
1339 if (ret) {
1340 dev_err(dev, "Failed to initialize EPC\n");
1341 goto err_bar_alloc;
1342 }
1343
1344 epf_set_drvdata(epf, ntb);
1345
1346 pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
1347 pci_vntb_table[0].vendor = ntb->vntb_vid;
1348 pci_vntb_table[0].device = ntb->vntb_pid;
1349
1350 ret = pci_register_driver(&vntb_pci_driver);
1351 if (ret) {
1352 dev_err(dev, "failure register vntb pci driver\n");
1353 goto err_bar_alloc;
1354 }
1355
1356 vpci_scan_bus(ntb);
1357
1358 return 0;
1359
1360err_bar_alloc:
1361 epf_ntb_config_spad_bar_free(ntb);
1362
1363err_bar_init:
1364 epf_ntb_epc_destroy(ntb);
1365
1366 return ret;
1367}
1368
1369/**
1370 * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
1371 * @epf: NTB endpoint function device
1372 *
1373 * Cleanup the initialization from epf_ntb_bind()
1374 */
1375static void epf_ntb_unbind(struct pci_epf *epf)
1376{
1377 struct epf_ntb *ntb = epf_get_drvdata(epf);
1378
1379 epf_ntb_epc_cleanup(ntb);
1380 epf_ntb_config_spad_bar_free(ntb);
1381 epf_ntb_epc_destroy(ntb);
1382
1383 pci_unregister_driver(&vntb_pci_driver);
1384}
1385
1386// EPF driver probe
1387static const struct pci_epf_ops epf_ntb_ops = {
1388 .bind = epf_ntb_bind,
1389 .unbind = epf_ntb_unbind,
1390 .add_cfs = epf_ntb_add_cfs,
1391};
1392
1393/**
1394 * epf_ntb_probe() - Probe NTB function driver
1395 * @epf: NTB endpoint function device
1396 * @id: NTB endpoint function device ID
1397 *
1398 * Probe NTB function driver when endpoint function bus detects a NTB
1399 * endpoint function.
1400 *
1401 * Returns: Zero for success, or an error code in case of failure
1402 */
1403static int epf_ntb_probe(struct pci_epf *epf,
1404 const struct pci_epf_device_id *id)
1405{
1406 struct epf_ntb *ntb;
1407 struct device *dev;
1408
1409 dev = &epf->dev;
1410
1411 ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
1412 if (!ntb)
1413 return -ENOMEM;
1414
1415 epf->header = &epf_ntb_header;
1416 ntb->epf = epf;
1417 ntb->vbus_number = 0xff;
1418 epf_set_drvdata(epf, ntb);
1419
1420 dev_info(dev, "pci-ep epf driver loaded\n");
1421 return 0;
1422}
1423
1424static const struct pci_epf_device_id epf_ntb_ids[] = {
1425 {
1426 .name = "pci_epf_vntb",
1427 },
1428 {},
1429};
1430
1431static struct pci_epf_driver epf_ntb_driver = {
1432 .driver.name = "pci_epf_vntb",
1433 .probe = epf_ntb_probe,
1434 .id_table = epf_ntb_ids,
1435 .ops = &epf_ntb_ops,
1436 .owner = THIS_MODULE,
1437};
1438
1439static int __init epf_ntb_init(void)
1440{
1441 int ret;
1442
1443 kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
1444 WQ_HIGHPRI, 0);
1445 ret = pci_epf_register_driver(&epf_ntb_driver);
1446 if (ret) {
1447 destroy_workqueue(kpcintb_workqueue);
1448 pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
1449 return ret;
1450 }
1451
1452 return 0;
1453}
1454module_init(epf_ntb_init);
1455
1456static void __exit epf_ntb_exit(void)
1457{
1458 pci_epf_unregister_driver(&epf_ntb_driver);
1459 destroy_workqueue(kpcintb_workqueue);
1460}
1461module_exit(epf_ntb_exit);
1462
1463MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
1464MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
1465MODULE_LICENSE("GPL v2");