Loading...
1// SPDX-License-Identifier: GPL-2.0
2/**
3 * PCI Endpoint *Controller* (EPC) library
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13
14#include <linux/pci-epc.h>
15#include <linux/pci-epf.h>
16#include <linux/pci-ep-cfs.h>
17
18static struct class *pci_epc_class;
19
20static void devm_pci_epc_release(struct device *dev, void *res)
21{
22 struct pci_epc *epc = *(struct pci_epc **)res;
23
24 pci_epc_destroy(epc);
25}
26
27static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
28{
29 struct pci_epc **epc = res;
30
31 return *epc == match_data;
32}
33
34/**
35 * pci_epc_put() - release the PCI endpoint controller
36 * @epc: epc returned by pci_epc_get()
37 *
38 * release the refcount the caller obtained by invoking pci_epc_get()
39 */
40void pci_epc_put(struct pci_epc *epc)
41{
42 if (!epc || IS_ERR(epc))
43 return;
44
45 module_put(epc->ops->owner);
46 put_device(&epc->dev);
47}
48EXPORT_SYMBOL_GPL(pci_epc_put);
49
50/**
51 * pci_epc_get() - get the PCI endpoint controller
52 * @epc_name: device name of the endpoint controller
53 *
54 * Invoke to get struct pci_epc * corresponding to the device name of the
55 * endpoint controller
56 */
57struct pci_epc *pci_epc_get(const char *epc_name)
58{
59 int ret = -EINVAL;
60 struct pci_epc *epc;
61 struct device *dev;
62 struct class_dev_iter iter;
63
64 class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
65 while ((dev = class_dev_iter_next(&iter))) {
66 if (strcmp(epc_name, dev_name(dev)))
67 continue;
68
69 epc = to_pci_epc(dev);
70 if (!try_module_get(epc->ops->owner)) {
71 ret = -EINVAL;
72 goto err;
73 }
74
75 class_dev_iter_exit(&iter);
76 get_device(&epc->dev);
77 return epc;
78 }
79
80err:
81 class_dev_iter_exit(&iter);
82 return ERR_PTR(ret);
83}
84EXPORT_SYMBOL_GPL(pci_epc_get);
85
86/**
87 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
88 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
89 *
90 * Invoke to get the first unreserved BAR that can be used for endpoint
91 * function. For any incorrect value in reserved_bar return '0'.
92 */
93unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
94 *epc_features)
95{
96 int free_bar;
97
98 if (!epc_features)
99 return 0;
100
101 free_bar = ffz(epc_features->reserved_bar);
102 if (free_bar > 5)
103 return 0;
104
105 return free_bar;
106}
107EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
108
109/**
110 * pci_epc_get_features() - get the features supported by EPC
111 * @epc: the features supported by *this* EPC device will be returned
112 * @func_no: the features supported by the EPC device specific to the
113 * endpoint function with func_no will be returned
114 *
115 * Invoke to get the features provided by the EPC which may be
116 * specific to an endpoint function. Returns pci_epc_features on success
117 * and NULL for any failures.
118 */
119const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
120 u8 func_no)
121{
122 const struct pci_epc_features *epc_features;
123 unsigned long flags;
124
125 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
126 return NULL;
127
128 if (!epc->ops->get_features)
129 return NULL;
130
131 spin_lock_irqsave(&epc->lock, flags);
132 epc_features = epc->ops->get_features(epc, func_no);
133 spin_unlock_irqrestore(&epc->lock, flags);
134
135 return epc_features;
136}
137EXPORT_SYMBOL_GPL(pci_epc_get_features);
138
139/**
140 * pci_epc_stop() - stop the PCI link
141 * @epc: the link of the EPC device that has to be stopped
142 *
143 * Invoke to stop the PCI link
144 */
145void pci_epc_stop(struct pci_epc *epc)
146{
147 unsigned long flags;
148
149 if (IS_ERR(epc) || !epc->ops->stop)
150 return;
151
152 spin_lock_irqsave(&epc->lock, flags);
153 epc->ops->stop(epc);
154 spin_unlock_irqrestore(&epc->lock, flags);
155}
156EXPORT_SYMBOL_GPL(pci_epc_stop);
157
158/**
159 * pci_epc_start() - start the PCI link
160 * @epc: the link of *this* EPC device has to be started
161 *
162 * Invoke to start the PCI link
163 */
164int pci_epc_start(struct pci_epc *epc)
165{
166 int ret;
167 unsigned long flags;
168
169 if (IS_ERR(epc))
170 return -EINVAL;
171
172 if (!epc->ops->start)
173 return 0;
174
175 spin_lock_irqsave(&epc->lock, flags);
176 ret = epc->ops->start(epc);
177 spin_unlock_irqrestore(&epc->lock, flags);
178
179 return ret;
180}
181EXPORT_SYMBOL_GPL(pci_epc_start);
182
183/**
184 * pci_epc_raise_irq() - interrupt the host system
185 * @epc: the EPC device which has to interrupt the host
186 * @func_no: the endpoint function number in the EPC device
187 * @type: specify the type of interrupt; legacy, MSI or MSI-X
188 * @interrupt_num: the MSI or MSI-X interrupt number
189 *
190 * Invoke to raise an legacy, MSI or MSI-X interrupt
191 */
192int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
193 enum pci_epc_irq_type type, u16 interrupt_num)
194{
195 int ret;
196 unsigned long flags;
197
198 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
199 return -EINVAL;
200
201 if (!epc->ops->raise_irq)
202 return 0;
203
204 spin_lock_irqsave(&epc->lock, flags);
205 ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
206 spin_unlock_irqrestore(&epc->lock, flags);
207
208 return ret;
209}
210EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
211
212/**
213 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
214 * @epc: the EPC device to which MSI interrupts was requested
215 * @func_no: the endpoint function number in the EPC device
216 *
217 * Invoke to get the number of MSI interrupts allocated by the RC
218 */
219int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
220{
221 int interrupt;
222 unsigned long flags;
223
224 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
225 return 0;
226
227 if (!epc->ops->get_msi)
228 return 0;
229
230 spin_lock_irqsave(&epc->lock, flags);
231 interrupt = epc->ops->get_msi(epc, func_no);
232 spin_unlock_irqrestore(&epc->lock, flags);
233
234 if (interrupt < 0)
235 return 0;
236
237 interrupt = 1 << interrupt;
238
239 return interrupt;
240}
241EXPORT_SYMBOL_GPL(pci_epc_get_msi);
242
243/**
244 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
245 * @epc: the EPC device on which MSI has to be configured
246 * @func_no: the endpoint function number in the EPC device
247 * @interrupts: number of MSI interrupts required by the EPF
248 *
249 * Invoke to set the required number of MSI interrupts.
250 */
251int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
252{
253 int ret;
254 u8 encode_int;
255 unsigned long flags;
256
257 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
258 interrupts > 32)
259 return -EINVAL;
260
261 if (!epc->ops->set_msi)
262 return 0;
263
264 encode_int = order_base_2(interrupts);
265
266 spin_lock_irqsave(&epc->lock, flags);
267 ret = epc->ops->set_msi(epc, func_no, encode_int);
268 spin_unlock_irqrestore(&epc->lock, flags);
269
270 return ret;
271}
272EXPORT_SYMBOL_GPL(pci_epc_set_msi);
273
274/**
275 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
276 * @epc: the EPC device to which MSI-X interrupts was requested
277 * @func_no: the endpoint function number in the EPC device
278 *
279 * Invoke to get the number of MSI-X interrupts allocated by the RC
280 */
281int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
282{
283 int interrupt;
284 unsigned long flags;
285
286 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
287 return 0;
288
289 if (!epc->ops->get_msix)
290 return 0;
291
292 spin_lock_irqsave(&epc->lock, flags);
293 interrupt = epc->ops->get_msix(epc, func_no);
294 spin_unlock_irqrestore(&epc->lock, flags);
295
296 if (interrupt < 0)
297 return 0;
298
299 return interrupt + 1;
300}
301EXPORT_SYMBOL_GPL(pci_epc_get_msix);
302
303/**
304 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
305 * @epc: the EPC device on which MSI-X has to be configured
306 * @func_no: the endpoint function number in the EPC device
307 * @interrupts: number of MSI-X interrupts required by the EPF
308 *
309 * Invoke to set the required number of MSI-X interrupts.
310 */
311int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
312{
313 int ret;
314 unsigned long flags;
315
316 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
317 interrupts < 1 || interrupts > 2048)
318 return -EINVAL;
319
320 if (!epc->ops->set_msix)
321 return 0;
322
323 spin_lock_irqsave(&epc->lock, flags);
324 ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
325 spin_unlock_irqrestore(&epc->lock, flags);
326
327 return ret;
328}
329EXPORT_SYMBOL_GPL(pci_epc_set_msix);
330
331/**
332 * pci_epc_unmap_addr() - unmap CPU address from PCI address
333 * @epc: the EPC device on which address is allocated
334 * @func_no: the endpoint function number in the EPC device
335 * @phys_addr: physical address of the local system
336 *
337 * Invoke to unmap the CPU address from PCI address.
338 */
339void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
340 phys_addr_t phys_addr)
341{
342 unsigned long flags;
343
344 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
345 return;
346
347 if (!epc->ops->unmap_addr)
348 return;
349
350 spin_lock_irqsave(&epc->lock, flags);
351 epc->ops->unmap_addr(epc, func_no, phys_addr);
352 spin_unlock_irqrestore(&epc->lock, flags);
353}
354EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
355
356/**
357 * pci_epc_map_addr() - map CPU address to PCI address
358 * @epc: the EPC device on which address is allocated
359 * @func_no: the endpoint function number in the EPC device
360 * @phys_addr: physical address of the local system
361 * @pci_addr: PCI address to which the physical address should be mapped
362 * @size: the size of the allocation
363 *
364 * Invoke to map CPU address with PCI address.
365 */
366int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
367 phys_addr_t phys_addr, u64 pci_addr, size_t size)
368{
369 int ret;
370 unsigned long flags;
371
372 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
373 return -EINVAL;
374
375 if (!epc->ops->map_addr)
376 return 0;
377
378 spin_lock_irqsave(&epc->lock, flags);
379 ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
380 spin_unlock_irqrestore(&epc->lock, flags);
381
382 return ret;
383}
384EXPORT_SYMBOL_GPL(pci_epc_map_addr);
385
386/**
387 * pci_epc_clear_bar() - reset the BAR
388 * @epc: the EPC device for which the BAR has to be cleared
389 * @func_no: the endpoint function number in the EPC device
390 * @epf_bar: the struct epf_bar that contains the BAR information
391 *
392 * Invoke to reset the BAR of the endpoint device.
393 */
394void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
395 struct pci_epf_bar *epf_bar)
396{
397 unsigned long flags;
398
399 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
400 (epf_bar->barno == BAR_5 &&
401 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
402 return;
403
404 if (!epc->ops->clear_bar)
405 return;
406
407 spin_lock_irqsave(&epc->lock, flags);
408 epc->ops->clear_bar(epc, func_no, epf_bar);
409 spin_unlock_irqrestore(&epc->lock, flags);
410}
411EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
412
413/**
414 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
415 * @epc: the EPC device on which BAR has to be configured
416 * @func_no: the endpoint function number in the EPC device
417 * @epf_bar: the struct epf_bar that contains the BAR information
418 *
419 * Invoke to configure the BAR of the endpoint device.
420 */
421int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
422 struct pci_epf_bar *epf_bar)
423{
424 int ret;
425 unsigned long irq_flags;
426 int flags = epf_bar->flags;
427
428 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
429 (epf_bar->barno == BAR_5 &&
430 flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
431 (flags & PCI_BASE_ADDRESS_SPACE_IO &&
432 flags & PCI_BASE_ADDRESS_IO_MASK) ||
433 (upper_32_bits(epf_bar->size) &&
434 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
435 return -EINVAL;
436
437 if (!epc->ops->set_bar)
438 return 0;
439
440 spin_lock_irqsave(&epc->lock, irq_flags);
441 ret = epc->ops->set_bar(epc, func_no, epf_bar);
442 spin_unlock_irqrestore(&epc->lock, irq_flags);
443
444 return ret;
445}
446EXPORT_SYMBOL_GPL(pci_epc_set_bar);
447
448/**
449 * pci_epc_write_header() - write standard configuration header
450 * @epc: the EPC device to which the configuration header should be written
451 * @func_no: the endpoint function number in the EPC device
452 * @header: standard configuration header fields
453 *
454 * Invoke to write the configuration header to the endpoint controller. Every
455 * endpoint controller will have a dedicated location to which the standard
456 * configuration header would be written. The callback function should write
457 * the header fields to this dedicated location.
458 */
459int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
460 struct pci_epf_header *header)
461{
462 int ret;
463 unsigned long flags;
464
465 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
466 return -EINVAL;
467
468 if (!epc->ops->write_header)
469 return 0;
470
471 spin_lock_irqsave(&epc->lock, flags);
472 ret = epc->ops->write_header(epc, func_no, header);
473 spin_unlock_irqrestore(&epc->lock, flags);
474
475 return ret;
476}
477EXPORT_SYMBOL_GPL(pci_epc_write_header);
478
479/**
480 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
481 * @epc: the EPC device to which the endpoint function should be added
482 * @epf: the endpoint function to be added
483 *
484 * A PCI endpoint device can have one or more functions. In the case of PCIe,
485 * the specification allows up to 8 PCIe endpoint functions. Invoke
486 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
487 */
488int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
489{
490 unsigned long flags;
491
492 if (epf->epc)
493 return -EBUSY;
494
495 if (IS_ERR(epc))
496 return -EINVAL;
497
498 if (epf->func_no > epc->max_functions - 1)
499 return -EINVAL;
500
501 epf->epc = epc;
502
503 spin_lock_irqsave(&epc->lock, flags);
504 list_add_tail(&epf->list, &epc->pci_epf);
505 spin_unlock_irqrestore(&epc->lock, flags);
506
507 return 0;
508}
509EXPORT_SYMBOL_GPL(pci_epc_add_epf);
510
511/**
512 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
513 * @epc: the EPC device from which the endpoint function should be removed
514 * @epf: the endpoint function to be removed
515 *
516 * Invoke to remove PCI endpoint function from the endpoint controller.
517 */
518void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
519{
520 unsigned long flags;
521
522 if (!epc || IS_ERR(epc) || !epf)
523 return;
524
525 spin_lock_irqsave(&epc->lock, flags);
526 list_del(&epf->list);
527 epf->epc = NULL;
528 spin_unlock_irqrestore(&epc->lock, flags);
529}
530EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
531
532/**
533 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
534 * connection with the Root Complex.
535 * @epc: the EPC device which has established link with the host
536 *
537 * Invoke to Notify the EPF device that the EPC device has established a
538 * connection with the Root Complex.
539 */
540void pci_epc_linkup(struct pci_epc *epc)
541{
542 unsigned long flags;
543 struct pci_epf *epf;
544
545 if (!epc || IS_ERR(epc))
546 return;
547
548 spin_lock_irqsave(&epc->lock, flags);
549 list_for_each_entry(epf, &epc->pci_epf, list)
550 pci_epf_linkup(epf);
551 spin_unlock_irqrestore(&epc->lock, flags);
552}
553EXPORT_SYMBOL_GPL(pci_epc_linkup);
554
555/**
556 * pci_epc_destroy() - destroy the EPC device
557 * @epc: the EPC device that has to be destroyed
558 *
559 * Invoke to destroy the PCI EPC device
560 */
561void pci_epc_destroy(struct pci_epc *epc)
562{
563 pci_ep_cfs_remove_epc_group(epc->group);
564 device_unregister(&epc->dev);
565 kfree(epc);
566}
567EXPORT_SYMBOL_GPL(pci_epc_destroy);
568
569/**
570 * devm_pci_epc_destroy() - destroy the EPC device
571 * @dev: device that wants to destroy the EPC
572 * @epc: the EPC device that has to be destroyed
573 *
574 * Invoke to destroy the devres associated with this
575 * pci_epc and destroy the EPC device.
576 */
577void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
578{
579 int r;
580
581 r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
582 epc);
583 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
584}
585EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
586
587/**
588 * __pci_epc_create() - create a new endpoint controller (EPC) device
589 * @dev: device that is creating the new EPC
590 * @ops: function pointers for performing EPC operations
591 * @owner: the owner of the module that creates the EPC device
592 *
593 * Invoke to create a new EPC device and add it to pci_epc class.
594 */
595struct pci_epc *
596__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
597 struct module *owner)
598{
599 int ret;
600 struct pci_epc *epc;
601
602 if (WARN_ON(!dev)) {
603 ret = -EINVAL;
604 goto err_ret;
605 }
606
607 epc = kzalloc(sizeof(*epc), GFP_KERNEL);
608 if (!epc) {
609 ret = -ENOMEM;
610 goto err_ret;
611 }
612
613 spin_lock_init(&epc->lock);
614 INIT_LIST_HEAD(&epc->pci_epf);
615
616 device_initialize(&epc->dev);
617 epc->dev.class = pci_epc_class;
618 epc->dev.parent = dev;
619 epc->ops = ops;
620
621 ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
622 if (ret)
623 goto put_dev;
624
625 ret = device_add(&epc->dev);
626 if (ret)
627 goto put_dev;
628
629 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
630
631 return epc;
632
633put_dev:
634 put_device(&epc->dev);
635 kfree(epc);
636
637err_ret:
638 return ERR_PTR(ret);
639}
640EXPORT_SYMBOL_GPL(__pci_epc_create);
641
642/**
643 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
644 * @dev: device that is creating the new EPC
645 * @ops: function pointers for performing EPC operations
646 * @owner: the owner of the module that creates the EPC device
647 *
648 * Invoke to create a new EPC device and add it to pci_epc class.
649 * While at that, it also associates the device with the pci_epc using devres.
650 * On driver detach, release function is invoked on the devres data,
651 * then, devres data is freed.
652 */
653struct pci_epc *
654__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
655 struct module *owner)
656{
657 struct pci_epc **ptr, *epc;
658
659 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
660 if (!ptr)
661 return ERR_PTR(-ENOMEM);
662
663 epc = __pci_epc_create(dev, ops, owner);
664 if (!IS_ERR(epc)) {
665 *ptr = epc;
666 devres_add(dev, ptr);
667 } else {
668 devres_free(ptr);
669 }
670
671 return epc;
672}
673EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
674
675static int __init pci_epc_init(void)
676{
677 pci_epc_class = class_create(THIS_MODULE, "pci_epc");
678 if (IS_ERR(pci_epc_class)) {
679 pr_err("failed to create pci epc class --> %ld\n",
680 PTR_ERR(pci_epc_class));
681 return PTR_ERR(pci_epc_class);
682 }
683
684 return 0;
685}
686module_init(pci_epc_init);
687
688static void __exit pci_epc_exit(void)
689{
690 class_destroy(pci_epc_class);
691}
692module_exit(pci_epc_exit);
693
694MODULE_DESCRIPTION("PCI EPC Library");
695MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
696MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Endpoint *Controller* (EPC) library
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13
14#include <linux/pci-epc.h>
15#include <linux/pci-epf.h>
16#include <linux/pci-ep-cfs.h>
17
18static struct class *pci_epc_class;
19
20static void devm_pci_epc_release(struct device *dev, void *res)
21{
22 struct pci_epc *epc = *(struct pci_epc **)res;
23
24 pci_epc_destroy(epc);
25}
26
27static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
28{
29 struct pci_epc **epc = res;
30
31 return *epc == match_data;
32}
33
34/**
35 * pci_epc_put() - release the PCI endpoint controller
36 * @epc: epc returned by pci_epc_get()
37 *
38 * release the refcount the caller obtained by invoking pci_epc_get()
39 */
40void pci_epc_put(struct pci_epc *epc)
41{
42 if (!epc || IS_ERR(epc))
43 return;
44
45 module_put(epc->ops->owner);
46 put_device(&epc->dev);
47}
48EXPORT_SYMBOL_GPL(pci_epc_put);
49
50/**
51 * pci_epc_get() - get the PCI endpoint controller
52 * @epc_name: device name of the endpoint controller
53 *
54 * Invoke to get struct pci_epc * corresponding to the device name of the
55 * endpoint controller
56 */
57struct pci_epc *pci_epc_get(const char *epc_name)
58{
59 int ret = -EINVAL;
60 struct pci_epc *epc;
61 struct device *dev;
62 struct class_dev_iter iter;
63
64 class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
65 while ((dev = class_dev_iter_next(&iter))) {
66 if (strcmp(epc_name, dev_name(dev)))
67 continue;
68
69 epc = to_pci_epc(dev);
70 if (!try_module_get(epc->ops->owner)) {
71 ret = -EINVAL;
72 goto err;
73 }
74
75 class_dev_iter_exit(&iter);
76 get_device(&epc->dev);
77 return epc;
78 }
79
80err:
81 class_dev_iter_exit(&iter);
82 return ERR_PTR(ret);
83}
84EXPORT_SYMBOL_GPL(pci_epc_get);
85
86/**
87 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
88 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
89 *
90 * Invoke to get the first unreserved BAR that can be used by the endpoint
91 * function. For any incorrect value in reserved_bar return '0'.
92 */
93enum pci_barno
94pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
95{
96 return pci_epc_get_next_free_bar(epc_features, BAR_0);
97}
98EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
99
100/**
101 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
102 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
103 * @bar: the starting BAR number from where unreserved BAR should be searched
104 *
105 * Invoke to get the next unreserved BAR starting from @bar that can be used
106 * for endpoint function. For any incorrect value in reserved_bar return '0'.
107 */
108enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
109 *epc_features, enum pci_barno bar)
110{
111 unsigned long free_bar;
112
113 if (!epc_features)
114 return BAR_0;
115
116 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
117 if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
118 bar++;
119
120 /* Find if the reserved BAR is also a 64-bit BAR */
121 free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
122
123 /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
124 free_bar <<= 1;
125 free_bar |= epc_features->reserved_bar;
126
127 free_bar = find_next_zero_bit(&free_bar, 6, bar);
128 if (free_bar > 5)
129 return NO_BAR;
130
131 return free_bar;
132}
133EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
134
135/**
136 * pci_epc_get_features() - get the features supported by EPC
137 * @epc: the features supported by *this* EPC device will be returned
138 * @func_no: the features supported by the EPC device specific to the
139 * endpoint function with func_no will be returned
140 *
141 * Invoke to get the features provided by the EPC which may be
142 * specific to an endpoint function. Returns pci_epc_features on success
143 * and NULL for any failures.
144 */
145const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
146 u8 func_no)
147{
148 const struct pci_epc_features *epc_features;
149
150 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
151 return NULL;
152
153 if (!epc->ops->get_features)
154 return NULL;
155
156 mutex_lock(&epc->lock);
157 epc_features = epc->ops->get_features(epc, func_no);
158 mutex_unlock(&epc->lock);
159
160 return epc_features;
161}
162EXPORT_SYMBOL_GPL(pci_epc_get_features);
163
164/**
165 * pci_epc_stop() - stop the PCI link
166 * @epc: the link of the EPC device that has to be stopped
167 *
168 * Invoke to stop the PCI link
169 */
170void pci_epc_stop(struct pci_epc *epc)
171{
172 if (IS_ERR(epc) || !epc->ops->stop)
173 return;
174
175 mutex_lock(&epc->lock);
176 epc->ops->stop(epc);
177 mutex_unlock(&epc->lock);
178}
179EXPORT_SYMBOL_GPL(pci_epc_stop);
180
181/**
182 * pci_epc_start() - start the PCI link
183 * @epc: the link of *this* EPC device has to be started
184 *
185 * Invoke to start the PCI link
186 */
187int pci_epc_start(struct pci_epc *epc)
188{
189 int ret;
190
191 if (IS_ERR(epc))
192 return -EINVAL;
193
194 if (!epc->ops->start)
195 return 0;
196
197 mutex_lock(&epc->lock);
198 ret = epc->ops->start(epc);
199 mutex_unlock(&epc->lock);
200
201 return ret;
202}
203EXPORT_SYMBOL_GPL(pci_epc_start);
204
205/**
206 * pci_epc_raise_irq() - interrupt the host system
207 * @epc: the EPC device which has to interrupt the host
208 * @func_no: the endpoint function number in the EPC device
209 * @type: specify the type of interrupt; legacy, MSI or MSI-X
210 * @interrupt_num: the MSI or MSI-X interrupt number
211 *
212 * Invoke to raise an legacy, MSI or MSI-X interrupt
213 */
214int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
215 enum pci_epc_irq_type type, u16 interrupt_num)
216{
217 int ret;
218
219 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
220 return -EINVAL;
221
222 if (!epc->ops->raise_irq)
223 return 0;
224
225 mutex_lock(&epc->lock);
226 ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
227 mutex_unlock(&epc->lock);
228
229 return ret;
230}
231EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
232
233/**
234 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
235 * MSI data
236 * @epc: the EPC device which has the MSI capability
237 * @func_no: the physical endpoint function number in the EPC device
238 * @phys_addr: the physical address of the outbound region
239 * @interrupt_num: the MSI interrupt number
240 * @entry_size: Size of Outbound address region for each interrupt
241 * @msi_data: the data that should be written in order to raise MSI interrupt
242 * with interrupt number as 'interrupt num'
243 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
244 * to which the MSI address is mapped
245 *
246 * Invoke to map physical address to MSI address and return MSI data. The
247 * physical address should be an address in the outbound region. This is
248 * required to implement doorbell functionality of NTB wherein EPC on either
249 * side of the interface (primary and secondary) can directly write to the
250 * physical address (in outbound region) of the other interface to ring
251 * doorbell.
252 */
253int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
254 u8 interrupt_num, u32 entry_size, u32 *msi_data,
255 u32 *msi_addr_offset)
256{
257 int ret;
258
259 if (IS_ERR_OR_NULL(epc))
260 return -EINVAL;
261
262 if (!epc->ops->map_msi_irq)
263 return -EINVAL;
264
265 mutex_lock(&epc->lock);
266 ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
267 entry_size, msi_data, msi_addr_offset);
268 mutex_unlock(&epc->lock);
269
270 return ret;
271}
272EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
273
274/**
275 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
276 * @epc: the EPC device to which MSI interrupts was requested
277 * @func_no: the endpoint function number in the EPC device
278 *
279 * Invoke to get the number of MSI interrupts allocated by the RC
280 */
281int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
282{
283 int interrupt;
284
285 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
286 return 0;
287
288 if (!epc->ops->get_msi)
289 return 0;
290
291 mutex_lock(&epc->lock);
292 interrupt = epc->ops->get_msi(epc, func_no);
293 mutex_unlock(&epc->lock);
294
295 if (interrupt < 0)
296 return 0;
297
298 interrupt = 1 << interrupt;
299
300 return interrupt;
301}
302EXPORT_SYMBOL_GPL(pci_epc_get_msi);
303
304/**
305 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
306 * @epc: the EPC device on which MSI has to be configured
307 * @func_no: the endpoint function number in the EPC device
308 * @interrupts: number of MSI interrupts required by the EPF
309 *
310 * Invoke to set the required number of MSI interrupts.
311 */
312int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
313{
314 int ret;
315 u8 encode_int;
316
317 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
318 interrupts > 32)
319 return -EINVAL;
320
321 if (!epc->ops->set_msi)
322 return 0;
323
324 encode_int = order_base_2(interrupts);
325
326 mutex_lock(&epc->lock);
327 ret = epc->ops->set_msi(epc, func_no, encode_int);
328 mutex_unlock(&epc->lock);
329
330 return ret;
331}
332EXPORT_SYMBOL_GPL(pci_epc_set_msi);
333
334/**
335 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
336 * @epc: the EPC device to which MSI-X interrupts was requested
337 * @func_no: the endpoint function number in the EPC device
338 *
339 * Invoke to get the number of MSI-X interrupts allocated by the RC
340 */
341int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
342{
343 int interrupt;
344
345 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
346 return 0;
347
348 if (!epc->ops->get_msix)
349 return 0;
350
351 mutex_lock(&epc->lock);
352 interrupt = epc->ops->get_msix(epc, func_no);
353 mutex_unlock(&epc->lock);
354
355 if (interrupt < 0)
356 return 0;
357
358 return interrupt + 1;
359}
360EXPORT_SYMBOL_GPL(pci_epc_get_msix);
361
362/**
363 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
364 * @epc: the EPC device on which MSI-X has to be configured
365 * @func_no: the endpoint function number in the EPC device
366 * @interrupts: number of MSI-X interrupts required by the EPF
367 * @bir: BAR where the MSI-X table resides
368 * @offset: Offset pointing to the start of MSI-X table
369 *
370 * Invoke to set the required number of MSI-X interrupts.
371 */
372int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
373 enum pci_barno bir, u32 offset)
374{
375 int ret;
376
377 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
378 interrupts < 1 || interrupts > 2048)
379 return -EINVAL;
380
381 if (!epc->ops->set_msix)
382 return 0;
383
384 mutex_lock(&epc->lock);
385 ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
386 mutex_unlock(&epc->lock);
387
388 return ret;
389}
390EXPORT_SYMBOL_GPL(pci_epc_set_msix);
391
392/**
393 * pci_epc_unmap_addr() - unmap CPU address from PCI address
394 * @epc: the EPC device on which address is allocated
395 * @func_no: the endpoint function number in the EPC device
396 * @phys_addr: physical address of the local system
397 *
398 * Invoke to unmap the CPU address from PCI address.
399 */
400void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
401 phys_addr_t phys_addr)
402{
403 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
404 return;
405
406 if (!epc->ops->unmap_addr)
407 return;
408
409 mutex_lock(&epc->lock);
410 epc->ops->unmap_addr(epc, func_no, phys_addr);
411 mutex_unlock(&epc->lock);
412}
413EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
414
415/**
416 * pci_epc_map_addr() - map CPU address to PCI address
417 * @epc: the EPC device on which address is allocated
418 * @func_no: the endpoint function number in the EPC device
419 * @phys_addr: physical address of the local system
420 * @pci_addr: PCI address to which the physical address should be mapped
421 * @size: the size of the allocation
422 *
423 * Invoke to map CPU address with PCI address.
424 */
425int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
426 phys_addr_t phys_addr, u64 pci_addr, size_t size)
427{
428 int ret;
429
430 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
431 return -EINVAL;
432
433 if (!epc->ops->map_addr)
434 return 0;
435
436 mutex_lock(&epc->lock);
437 ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
438 mutex_unlock(&epc->lock);
439
440 return ret;
441}
442EXPORT_SYMBOL_GPL(pci_epc_map_addr);
443
444/**
445 * pci_epc_clear_bar() - reset the BAR
446 * @epc: the EPC device for which the BAR has to be cleared
447 * @func_no: the endpoint function number in the EPC device
448 * @epf_bar: the struct epf_bar that contains the BAR information
449 *
450 * Invoke to reset the BAR of the endpoint device.
451 */
452void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
453 struct pci_epf_bar *epf_bar)
454{
455 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
456 (epf_bar->barno == BAR_5 &&
457 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
458 return;
459
460 if (!epc->ops->clear_bar)
461 return;
462
463 mutex_lock(&epc->lock);
464 epc->ops->clear_bar(epc, func_no, epf_bar);
465 mutex_unlock(&epc->lock);
466}
467EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
468
469/**
470 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
471 * @epc: the EPC device on which BAR has to be configured
472 * @func_no: the endpoint function number in the EPC device
473 * @epf_bar: the struct epf_bar that contains the BAR information
474 *
475 * Invoke to configure the BAR of the endpoint device.
476 */
477int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
478 struct pci_epf_bar *epf_bar)
479{
480 int ret;
481 int flags = epf_bar->flags;
482
483 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
484 (epf_bar->barno == BAR_5 &&
485 flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
486 (flags & PCI_BASE_ADDRESS_SPACE_IO &&
487 flags & PCI_BASE_ADDRESS_IO_MASK) ||
488 (upper_32_bits(epf_bar->size) &&
489 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
490 return -EINVAL;
491
492 if (!epc->ops->set_bar)
493 return 0;
494
495 mutex_lock(&epc->lock);
496 ret = epc->ops->set_bar(epc, func_no, epf_bar);
497 mutex_unlock(&epc->lock);
498
499 return ret;
500}
501EXPORT_SYMBOL_GPL(pci_epc_set_bar);
502
503/**
504 * pci_epc_write_header() - write standard configuration header
505 * @epc: the EPC device to which the configuration header should be written
506 * @func_no: the endpoint function number in the EPC device
507 * @header: standard configuration header fields
508 *
509 * Invoke to write the configuration header to the endpoint controller. Every
510 * endpoint controller will have a dedicated location to which the standard
511 * configuration header would be written. The callback function should write
512 * the header fields to this dedicated location.
513 */
514int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
515 struct pci_epf_header *header)
516{
517 int ret;
518
519 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
520 return -EINVAL;
521
522 if (!epc->ops->write_header)
523 return 0;
524
525 mutex_lock(&epc->lock);
526 ret = epc->ops->write_header(epc, func_no, header);
527 mutex_unlock(&epc->lock);
528
529 return ret;
530}
531EXPORT_SYMBOL_GPL(pci_epc_write_header);
532
533/**
534 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
535 * @epc: the EPC device to which the endpoint function should be added
536 * @epf: the endpoint function to be added
537 * @type: Identifies if the EPC is connected to the primary or secondary
538 * interface of EPF
539 *
540 * A PCI endpoint device can have one or more functions. In the case of PCIe,
541 * the specification allows up to 8 PCIe endpoint functions. Invoke
542 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
543 */
544int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
545 enum pci_epc_interface_type type)
546{
547 struct list_head *list;
548 u32 func_no;
549 int ret = 0;
550
551 if (IS_ERR_OR_NULL(epc))
552 return -EINVAL;
553
554 if (type == PRIMARY_INTERFACE && epf->epc)
555 return -EBUSY;
556
557 if (type == SECONDARY_INTERFACE && epf->sec_epc)
558 return -EBUSY;
559
560 mutex_lock(&epc->lock);
561 func_no = find_first_zero_bit(&epc->function_num_map,
562 BITS_PER_LONG);
563 if (func_no >= BITS_PER_LONG) {
564 ret = -EINVAL;
565 goto ret;
566 }
567
568 if (func_no > epc->max_functions - 1) {
569 dev_err(&epc->dev, "Exceeding max supported Function Number\n");
570 ret = -EINVAL;
571 goto ret;
572 }
573
574 set_bit(func_no, &epc->function_num_map);
575 if (type == PRIMARY_INTERFACE) {
576 epf->func_no = func_no;
577 epf->epc = epc;
578 list = &epf->list;
579 } else {
580 epf->sec_epc_func_no = func_no;
581 epf->sec_epc = epc;
582 list = &epf->sec_epc_list;
583 }
584
585 list_add_tail(list, &epc->pci_epf);
586ret:
587 mutex_unlock(&epc->lock);
588
589 return ret;
590}
591EXPORT_SYMBOL_GPL(pci_epc_add_epf);
592
593/**
594 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
595 * @epc: the EPC device from which the endpoint function should be removed
596 * @epf: the endpoint function to be removed
597 * @type: identifies if the EPC is connected to the primary or secondary
598 * interface of EPF
599 *
600 * Invoke to remove PCI endpoint function from the endpoint controller.
601 */
602void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
603 enum pci_epc_interface_type type)
604{
605 struct list_head *list;
606 u32 func_no = 0;
607
608 if (!epc || IS_ERR(epc) || !epf)
609 return;
610
611 if (type == PRIMARY_INTERFACE) {
612 func_no = epf->func_no;
613 list = &epf->list;
614 } else {
615 func_no = epf->sec_epc_func_no;
616 list = &epf->sec_epc_list;
617 }
618
619 mutex_lock(&epc->lock);
620 clear_bit(func_no, &epc->function_num_map);
621 list_del(list);
622 epf->epc = NULL;
623 mutex_unlock(&epc->lock);
624}
625EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
626
627/**
628 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
629 * connection with the Root Complex.
630 * @epc: the EPC device which has established link with the host
631 *
632 * Invoke to Notify the EPF device that the EPC device has established a
633 * connection with the Root Complex.
634 */
635void pci_epc_linkup(struct pci_epc *epc)
636{
637 if (!epc || IS_ERR(epc))
638 return;
639
640 atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
641}
642EXPORT_SYMBOL_GPL(pci_epc_linkup);
643
644/**
645 * pci_epc_init_notify() - Notify the EPF device that EPC device's core
646 * initialization is completed.
647 * @epc: the EPC device whose core initialization is completeds
648 *
649 * Invoke to Notify the EPF device that the EPC device's initialization
650 * is completed.
651 */
652void pci_epc_init_notify(struct pci_epc *epc)
653{
654 if (!epc || IS_ERR(epc))
655 return;
656
657 atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
658}
659EXPORT_SYMBOL_GPL(pci_epc_init_notify);
660
661/**
662 * pci_epc_destroy() - destroy the EPC device
663 * @epc: the EPC device that has to be destroyed
664 *
665 * Invoke to destroy the PCI EPC device
666 */
667void pci_epc_destroy(struct pci_epc *epc)
668{
669 pci_ep_cfs_remove_epc_group(epc->group);
670 device_unregister(&epc->dev);
671 kfree(epc);
672}
673EXPORT_SYMBOL_GPL(pci_epc_destroy);
674
675/**
676 * devm_pci_epc_destroy() - destroy the EPC device
677 * @dev: device that wants to destroy the EPC
678 * @epc: the EPC device that has to be destroyed
679 *
680 * Invoke to destroy the devres associated with this
681 * pci_epc and destroy the EPC device.
682 */
683void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
684{
685 int r;
686
687 r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
688 epc);
689 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
690}
691EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
692
693/**
694 * __pci_epc_create() - create a new endpoint controller (EPC) device
695 * @dev: device that is creating the new EPC
696 * @ops: function pointers for performing EPC operations
697 * @owner: the owner of the module that creates the EPC device
698 *
699 * Invoke to create a new EPC device and add it to pci_epc class.
700 */
701struct pci_epc *
702__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
703 struct module *owner)
704{
705 int ret;
706 struct pci_epc *epc;
707
708 if (WARN_ON(!dev)) {
709 ret = -EINVAL;
710 goto err_ret;
711 }
712
713 epc = kzalloc(sizeof(*epc), GFP_KERNEL);
714 if (!epc) {
715 ret = -ENOMEM;
716 goto err_ret;
717 }
718
719 mutex_init(&epc->lock);
720 INIT_LIST_HEAD(&epc->pci_epf);
721 ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
722
723 device_initialize(&epc->dev);
724 epc->dev.class = pci_epc_class;
725 epc->dev.parent = dev;
726 epc->ops = ops;
727
728 ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
729 if (ret)
730 goto put_dev;
731
732 ret = device_add(&epc->dev);
733 if (ret)
734 goto put_dev;
735
736 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
737
738 return epc;
739
740put_dev:
741 put_device(&epc->dev);
742 kfree(epc);
743
744err_ret:
745 return ERR_PTR(ret);
746}
747EXPORT_SYMBOL_GPL(__pci_epc_create);
748
749/**
750 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
751 * @dev: device that is creating the new EPC
752 * @ops: function pointers for performing EPC operations
753 * @owner: the owner of the module that creates the EPC device
754 *
755 * Invoke to create a new EPC device and add it to pci_epc class.
756 * While at that, it also associates the device with the pci_epc using devres.
757 * On driver detach, release function is invoked on the devres data,
758 * then, devres data is freed.
759 */
760struct pci_epc *
761__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
762 struct module *owner)
763{
764 struct pci_epc **ptr, *epc;
765
766 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
767 if (!ptr)
768 return ERR_PTR(-ENOMEM);
769
770 epc = __pci_epc_create(dev, ops, owner);
771 if (!IS_ERR(epc)) {
772 *ptr = epc;
773 devres_add(dev, ptr);
774 } else {
775 devres_free(ptr);
776 }
777
778 return epc;
779}
780EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
781
782static int __init pci_epc_init(void)
783{
784 pci_epc_class = class_create(THIS_MODULE, "pci_epc");
785 if (IS_ERR(pci_epc_class)) {
786 pr_err("failed to create pci epc class --> %ld\n",
787 PTR_ERR(pci_epc_class));
788 return PTR_ERR(pci_epc_class);
789 }
790
791 return 0;
792}
793module_init(pci_epc_init);
794
795static void __exit pci_epc_exit(void)
796{
797 class_destroy(pci_epc_class);
798}
799module_exit(pci_epc_exit);
800
801MODULE_DESCRIPTION("PCI EPC Library");
802MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
803MODULE_LICENSE("GPL v2");