Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * PCI Endpoint *Controller* (EPC) library
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/of_device.h>
 13
 14#include <linux/pci-epc.h>
 15#include <linux/pci-epf.h>
 16#include <linux/pci-ep-cfs.h>
 17
 18static struct class *pci_epc_class;
 19
 20static void devm_pci_epc_release(struct device *dev, void *res)
 21{
 22	struct pci_epc *epc = *(struct pci_epc **)res;
 23
 24	pci_epc_destroy(epc);
 25}
 26
 27static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
 28{
 29	struct pci_epc **epc = res;
 30
 31	return *epc == match_data;
 32}
 33
 34/**
 35 * pci_epc_put() - release the PCI endpoint controller
 36 * @epc: epc returned by pci_epc_get()
 37 *
 38 * release the refcount the caller obtained by invoking pci_epc_get()
 39 */
 40void pci_epc_put(struct pci_epc *epc)
 41{
 42	if (!epc || IS_ERR(epc))
 43		return;
 44
 45	module_put(epc->ops->owner);
 46	put_device(&epc->dev);
 47}
 48EXPORT_SYMBOL_GPL(pci_epc_put);
 49
 50/**
 51 * pci_epc_get() - get the PCI endpoint controller
 52 * @epc_name: device name of the endpoint controller
 53 *
 54 * Invoke to get struct pci_epc * corresponding to the device name of the
 55 * endpoint controller
 56 */
 57struct pci_epc *pci_epc_get(const char *epc_name)
 58{
 59	int ret = -EINVAL;
 60	struct pci_epc *epc;
 61	struct device *dev;
 62	struct class_dev_iter iter;
 63
 64	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
 65	while ((dev = class_dev_iter_next(&iter))) {
 66		if (strcmp(epc_name, dev_name(dev)))
 67			continue;
 68
 69		epc = to_pci_epc(dev);
 70		if (!try_module_get(epc->ops->owner)) {
 71			ret = -EINVAL;
 72			goto err;
 73		}
 74
 75		class_dev_iter_exit(&iter);
 76		get_device(&epc->dev);
 77		return epc;
 78	}
 79
 80err:
 81	class_dev_iter_exit(&iter);
 82	return ERR_PTR(ret);
 83}
 84EXPORT_SYMBOL_GPL(pci_epc_get);
 85
 86/**
 87 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
 88 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
 89 *
 90 * Invoke to get the first unreserved BAR that can be used for endpoint
 91 * function. For any incorrect value in reserved_bar return '0'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92 */
 93unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
 94					*epc_features)
 95{
 96	int free_bar;
 97
 98	if (!epc_features)
 99		return 0;
100
101	free_bar = ffz(epc_features->reserved_bar);
102	if (free_bar > 5)
103		return 0;
 
 
 
 
 
 
104
105	return free_bar;
106}
107EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
108
109/**
110 * pci_epc_get_features() - get the features supported by EPC
111 * @epc: the features supported by *this* EPC device will be returned
112 * @func_no: the features supported by the EPC device specific to the
113 *	     endpoint function with func_no will be returned
 
 
114 *
115 * Invoke to get the features provided by the EPC which may be
116 * specific to an endpoint function. Returns pci_epc_features on success
117 * and NULL for any failures.
118 */
119const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
120						    u8 func_no)
121{
122	const struct pci_epc_features *epc_features;
123	unsigned long flags;
124
125	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
126		return NULL;
127
 
 
 
128	if (!epc->ops->get_features)
129		return NULL;
130
131	spin_lock_irqsave(&epc->lock, flags);
132	epc_features = epc->ops->get_features(epc, func_no);
133	spin_unlock_irqrestore(&epc->lock, flags);
134
135	return epc_features;
136}
137EXPORT_SYMBOL_GPL(pci_epc_get_features);
138
139/**
140 * pci_epc_stop() - stop the PCI link
141 * @epc: the link of the EPC device that has to be stopped
142 *
143 * Invoke to stop the PCI link
144 */
145void pci_epc_stop(struct pci_epc *epc)
146{
147	unsigned long flags;
148
149	if (IS_ERR(epc) || !epc->ops->stop)
150		return;
151
152	spin_lock_irqsave(&epc->lock, flags);
153	epc->ops->stop(epc);
154	spin_unlock_irqrestore(&epc->lock, flags);
155}
156EXPORT_SYMBOL_GPL(pci_epc_stop);
157
158/**
159 * pci_epc_start() - start the PCI link
160 * @epc: the link of *this* EPC device has to be started
161 *
162 * Invoke to start the PCI link
163 */
164int pci_epc_start(struct pci_epc *epc)
165{
166	int ret;
167	unsigned long flags;
168
169	if (IS_ERR(epc))
170		return -EINVAL;
171
172	if (!epc->ops->start)
173		return 0;
174
175	spin_lock_irqsave(&epc->lock, flags);
176	ret = epc->ops->start(epc);
177	spin_unlock_irqrestore(&epc->lock, flags);
178
179	return ret;
180}
181EXPORT_SYMBOL_GPL(pci_epc_start);
182
183/**
184 * pci_epc_raise_irq() - interrupt the host system
185 * @epc: the EPC device which has to interrupt the host
186 * @func_no: the endpoint function number in the EPC device
187 * @type: specify the type of interrupt; legacy, MSI or MSI-X
188 * @interrupt_num: the MSI or MSI-X interrupt number
 
189 *
190 * Invoke to raise an legacy, MSI or MSI-X interrupt
191 */
192int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
193		      enum pci_epc_irq_type type, u16 interrupt_num)
194{
195	int ret;
196	unsigned long flags;
197
198	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
199		return -EINVAL;
200
 
 
 
201	if (!epc->ops->raise_irq)
202		return 0;
203
204	spin_lock_irqsave(&epc->lock, flags);
205	ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
206	spin_unlock_irqrestore(&epc->lock, flags);
207
208	return ret;
209}
210EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
211
212/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
214 * @epc: the EPC device to which MSI interrupts was requested
215 * @func_no: the endpoint function number in the EPC device
 
216 *
217 * Invoke to get the number of MSI interrupts allocated by the RC
218 */
219int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
220{
221	int interrupt;
222	unsigned long flags;
223
224	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
225		return 0;
226
 
 
 
227	if (!epc->ops->get_msi)
228		return 0;
229
230	spin_lock_irqsave(&epc->lock, flags);
231	interrupt = epc->ops->get_msi(epc, func_no);
232	spin_unlock_irqrestore(&epc->lock, flags);
233
234	if (interrupt < 0)
235		return 0;
236
237	interrupt = 1 << interrupt;
238
239	return interrupt;
240}
241EXPORT_SYMBOL_GPL(pci_epc_get_msi);
242
243/**
244 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
245 * @epc: the EPC device on which MSI has to be configured
246 * @func_no: the endpoint function number in the EPC device
 
247 * @interrupts: number of MSI interrupts required by the EPF
248 *
249 * Invoke to set the required number of MSI interrupts.
250 */
251int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
252{
253	int ret;
254	u8 encode_int;
255	unsigned long flags;
256
257	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
258	    interrupts > 32)
 
 
 
259		return -EINVAL;
260
261	if (!epc->ops->set_msi)
262		return 0;
263
264	encode_int = order_base_2(interrupts);
265
266	spin_lock_irqsave(&epc->lock, flags);
267	ret = epc->ops->set_msi(epc, func_no, encode_int);
268	spin_unlock_irqrestore(&epc->lock, flags);
269
270	return ret;
271}
272EXPORT_SYMBOL_GPL(pci_epc_set_msi);
273
274/**
275 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
276 * @epc: the EPC device to which MSI-X interrupts was requested
277 * @func_no: the endpoint function number in the EPC device
 
278 *
279 * Invoke to get the number of MSI-X interrupts allocated by the RC
280 */
281int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
282{
283	int interrupt;
284	unsigned long flags;
285
286	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
287		return 0;
288
 
 
 
289	if (!epc->ops->get_msix)
290		return 0;
291
292	spin_lock_irqsave(&epc->lock, flags);
293	interrupt = epc->ops->get_msix(epc, func_no);
294	spin_unlock_irqrestore(&epc->lock, flags);
295
296	if (interrupt < 0)
297		return 0;
298
299	return interrupt + 1;
300}
301EXPORT_SYMBOL_GPL(pci_epc_get_msix);
302
303/**
304 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
305 * @epc: the EPC device on which MSI-X has to be configured
306 * @func_no: the endpoint function number in the EPC device
 
307 * @interrupts: number of MSI-X interrupts required by the EPF
 
 
308 *
309 * Invoke to set the required number of MSI-X interrupts.
310 */
311int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
 
312{
313	int ret;
314	unsigned long flags;
315
316	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
317	    interrupts < 1 || interrupts > 2048)
318		return -EINVAL;
319
 
 
 
320	if (!epc->ops->set_msix)
321		return 0;
322
323	spin_lock_irqsave(&epc->lock, flags);
324	ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
325	spin_unlock_irqrestore(&epc->lock, flags);
 
326
327	return ret;
328}
329EXPORT_SYMBOL_GPL(pci_epc_set_msix);
330
331/**
332 * pci_epc_unmap_addr() - unmap CPU address from PCI address
333 * @epc: the EPC device on which address is allocated
334 * @func_no: the endpoint function number in the EPC device
 
335 * @phys_addr: physical address of the local system
336 *
337 * Invoke to unmap the CPU address from PCI address.
338 */
339void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
340			phys_addr_t phys_addr)
341{
342	unsigned long flags;
343
344	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
345		return;
346
 
 
 
347	if (!epc->ops->unmap_addr)
348		return;
349
350	spin_lock_irqsave(&epc->lock, flags);
351	epc->ops->unmap_addr(epc, func_no, phys_addr);
352	spin_unlock_irqrestore(&epc->lock, flags);
353}
354EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
355
356/**
357 * pci_epc_map_addr() - map CPU address to PCI address
358 * @epc: the EPC device on which address is allocated
359 * @func_no: the endpoint function number in the EPC device
 
360 * @phys_addr: physical address of the local system
361 * @pci_addr: PCI address to which the physical address should be mapped
362 * @size: the size of the allocation
363 *
364 * Invoke to map CPU address with PCI address.
365 */
366int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
367		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
368{
369	int ret;
370	unsigned long flags;
371
372	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
373		return -EINVAL;
374
 
 
 
375	if (!epc->ops->map_addr)
376		return 0;
377
378	spin_lock_irqsave(&epc->lock, flags);
379	ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
380	spin_unlock_irqrestore(&epc->lock, flags);
 
381
382	return ret;
383}
384EXPORT_SYMBOL_GPL(pci_epc_map_addr);
385
386/**
387 * pci_epc_clear_bar() - reset the BAR
388 * @epc: the EPC device for which the BAR has to be cleared
389 * @func_no: the endpoint function number in the EPC device
 
390 * @epf_bar: the struct epf_bar that contains the BAR information
391 *
392 * Invoke to reset the BAR of the endpoint device.
393 */
394void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
395		       struct pci_epf_bar *epf_bar)
396{
397	unsigned long flags;
398
399	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
400	    (epf_bar->barno == BAR_5 &&
401	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
402		return;
403
 
 
 
404	if (!epc->ops->clear_bar)
405		return;
406
407	spin_lock_irqsave(&epc->lock, flags);
408	epc->ops->clear_bar(epc, func_no, epf_bar);
409	spin_unlock_irqrestore(&epc->lock, flags);
410}
411EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
412
413/**
414 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
415 * @epc: the EPC device on which BAR has to be configured
416 * @func_no: the endpoint function number in the EPC device
 
417 * @epf_bar: the struct epf_bar that contains the BAR information
418 *
419 * Invoke to configure the BAR of the endpoint device.
420 */
421int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
422		    struct pci_epf_bar *epf_bar)
423{
424	int ret;
425	unsigned long irq_flags;
426	int flags = epf_bar->flags;
427
428	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
429	    (epf_bar->barno == BAR_5 &&
430	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
431	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
432	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
433	    (upper_32_bits(epf_bar->size) &&
434	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
435		return -EINVAL;
436
 
 
 
437	if (!epc->ops->set_bar)
438		return 0;
439
440	spin_lock_irqsave(&epc->lock, irq_flags);
441	ret = epc->ops->set_bar(epc, func_no, epf_bar);
442	spin_unlock_irqrestore(&epc->lock, irq_flags);
443
444	return ret;
445}
446EXPORT_SYMBOL_GPL(pci_epc_set_bar);
447
448/**
449 * pci_epc_write_header() - write standard configuration header
450 * @epc: the EPC device to which the configuration header should be written
451 * @func_no: the endpoint function number in the EPC device
 
452 * @header: standard configuration header fields
453 *
454 * Invoke to write the configuration header to the endpoint controller. Every
455 * endpoint controller will have a dedicated location to which the standard
456 * configuration header would be written. The callback function should write
457 * the header fields to this dedicated location.
458 */
459int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
460			 struct pci_epf_header *header)
461{
462	int ret;
463	unsigned long flags;
464
465	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
466		return -EINVAL;
467
 
 
 
 
 
 
 
468	if (!epc->ops->write_header)
469		return 0;
470
471	spin_lock_irqsave(&epc->lock, flags);
472	ret = epc->ops->write_header(epc, func_no, header);
473	spin_unlock_irqrestore(&epc->lock, flags);
474
475	return ret;
476}
477EXPORT_SYMBOL_GPL(pci_epc_write_header);
478
479/**
480 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
481 * @epc: the EPC device to which the endpoint function should be added
482 * @epf: the endpoint function to be added
 
 
483 *
484 * A PCI endpoint device can have one or more functions. In the case of PCIe,
485 * the specification allows up to 8 PCIe endpoint functions. Invoke
486 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
487 */
488int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
 
489{
490	unsigned long flags;
 
 
 
 
 
491
492	if (epf->epc)
493		return -EBUSY;
494
495	if (IS_ERR(epc))
496		return -EINVAL;
497
498	if (epf->func_no > epc->max_functions - 1)
499		return -EINVAL;
 
 
 
 
 
500
501	epf->epc = epc;
 
 
 
 
502
503	spin_lock_irqsave(&epc->lock, flags);
504	list_add_tail(&epf->list, &epc->pci_epf);
505	spin_unlock_irqrestore(&epc->lock, flags);
 
 
 
 
 
 
 
506
507	return 0;
 
 
 
 
508}
509EXPORT_SYMBOL_GPL(pci_epc_add_epf);
510
511/**
512 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
513 * @epc: the EPC device from which the endpoint function should be removed
514 * @epf: the endpoint function to be removed
 
 
515 *
516 * Invoke to remove PCI endpoint function from the endpoint controller.
517 */
518void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
 
519{
520	unsigned long flags;
 
521
522	if (!epc || IS_ERR(epc) || !epf)
523		return;
524
525	spin_lock_irqsave(&epc->lock, flags);
526	list_del(&epf->list);
 
 
 
 
 
 
 
 
 
527	epf->epc = NULL;
528	spin_unlock_irqrestore(&epc->lock, flags);
529}
530EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
531
532/**
533 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
534 *		      connection with the Root Complex.
535 * @epc: the EPC device which has established link with the host
536 *
537 * Invoke to Notify the EPF device that the EPC device has established a
538 * connection with the Root Complex.
539 */
540void pci_epc_linkup(struct pci_epc *epc)
541{
542	unsigned long flags;
543	struct pci_epf *epf;
544
545	if (!epc || IS_ERR(epc))
546		return;
547
548	spin_lock_irqsave(&epc->lock, flags);
549	list_for_each_entry(epf, &epc->pci_epf, list)
550		pci_epf_linkup(epf);
551	spin_unlock_irqrestore(&epc->lock, flags);
 
 
 
 
552}
553EXPORT_SYMBOL_GPL(pci_epc_linkup);
554
555/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556 * pci_epc_destroy() - destroy the EPC device
557 * @epc: the EPC device that has to be destroyed
558 *
559 * Invoke to destroy the PCI EPC device
560 */
561void pci_epc_destroy(struct pci_epc *epc)
562{
563	pci_ep_cfs_remove_epc_group(epc->group);
564	device_unregister(&epc->dev);
565	kfree(epc);
566}
567EXPORT_SYMBOL_GPL(pci_epc_destroy);
568
569/**
570 * devm_pci_epc_destroy() - destroy the EPC device
571 * @dev: device that wants to destroy the EPC
572 * @epc: the EPC device that has to be destroyed
573 *
574 * Invoke to destroy the devres associated with this
575 * pci_epc and destroy the EPC device.
576 */
577void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
578{
579	int r;
580
581	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
582			   epc);
583	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
584}
585EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
586
 
 
 
 
 
587/**
588 * __pci_epc_create() - create a new endpoint controller (EPC) device
589 * @dev: device that is creating the new EPC
590 * @ops: function pointers for performing EPC operations
591 * @owner: the owner of the module that creates the EPC device
592 *
593 * Invoke to create a new EPC device and add it to pci_epc class.
594 */
595struct pci_epc *
596__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
597		 struct module *owner)
598{
599	int ret;
600	struct pci_epc *epc;
601
602	if (WARN_ON(!dev)) {
603		ret = -EINVAL;
604		goto err_ret;
605	}
606
607	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
608	if (!epc) {
609		ret = -ENOMEM;
610		goto err_ret;
611	}
612
613	spin_lock_init(&epc->lock);
 
614	INIT_LIST_HEAD(&epc->pci_epf);
615
616	device_initialize(&epc->dev);
617	epc->dev.class = pci_epc_class;
618	epc->dev.parent = dev;
 
619	epc->ops = ops;
620
621	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
622	if (ret)
623		goto put_dev;
624
625	ret = device_add(&epc->dev);
626	if (ret)
627		goto put_dev;
628
629	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
630
631	return epc;
632
633put_dev:
634	put_device(&epc->dev);
635	kfree(epc);
636
637err_ret:
638	return ERR_PTR(ret);
639}
640EXPORT_SYMBOL_GPL(__pci_epc_create);
641
642/**
643 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
644 * @dev: device that is creating the new EPC
645 * @ops: function pointers for performing EPC operations
646 * @owner: the owner of the module that creates the EPC device
647 *
648 * Invoke to create a new EPC device and add it to pci_epc class.
649 * While at that, it also associates the device with the pci_epc using devres.
650 * On driver detach, release function is invoked on the devres data,
651 * then, devres data is freed.
652 */
653struct pci_epc *
654__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
655		      struct module *owner)
656{
657	struct pci_epc **ptr, *epc;
658
659	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
660	if (!ptr)
661		return ERR_PTR(-ENOMEM);
662
663	epc = __pci_epc_create(dev, ops, owner);
664	if (!IS_ERR(epc)) {
665		*ptr = epc;
666		devres_add(dev, ptr);
667	} else {
668		devres_free(ptr);
669	}
670
671	return epc;
672}
673EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
674
675static int __init pci_epc_init(void)
676{
677	pci_epc_class = class_create(THIS_MODULE, "pci_epc");
678	if (IS_ERR(pci_epc_class)) {
679		pr_err("failed to create pci epc class --> %ld\n",
680		       PTR_ERR(pci_epc_class));
681		return PTR_ERR(pci_epc_class);
682	}
683
684	return 0;
685}
686module_init(pci_epc_init);
687
688static void __exit pci_epc_exit(void)
689{
690	class_destroy(pci_epc_class);
691}
692module_exit(pci_epc_exit);
693
694MODULE_DESCRIPTION("PCI EPC Library");
695MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
696MODULE_LICENSE("GPL v2");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PCI Endpoint *Controller* (EPC) library
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/module.h>
 
 12
 13#include <linux/pci-epc.h>
 14#include <linux/pci-epf.h>
 15#include <linux/pci-ep-cfs.h>
 16
 17static struct class *pci_epc_class;
 18
 19static void devm_pci_epc_release(struct device *dev, void *res)
 20{
 21	struct pci_epc *epc = *(struct pci_epc **)res;
 22
 23	pci_epc_destroy(epc);
 24}
 25
 26static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
 27{
 28	struct pci_epc **epc = res;
 29
 30	return *epc == match_data;
 31}
 32
 33/**
 34 * pci_epc_put() - release the PCI endpoint controller
 35 * @epc: epc returned by pci_epc_get()
 36 *
 37 * release the refcount the caller obtained by invoking pci_epc_get()
 38 */
 39void pci_epc_put(struct pci_epc *epc)
 40{
 41	if (IS_ERR_OR_NULL(epc))
 42		return;
 43
 44	module_put(epc->ops->owner);
 45	put_device(&epc->dev);
 46}
 47EXPORT_SYMBOL_GPL(pci_epc_put);
 48
 49/**
 50 * pci_epc_get() - get the PCI endpoint controller
 51 * @epc_name: device name of the endpoint controller
 52 *
 53 * Invoke to get struct pci_epc * corresponding to the device name of the
 54 * endpoint controller
 55 */
 56struct pci_epc *pci_epc_get(const char *epc_name)
 57{
 58	int ret = -EINVAL;
 59	struct pci_epc *epc;
 60	struct device *dev;
 61	struct class_dev_iter iter;
 62
 63	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
 64	while ((dev = class_dev_iter_next(&iter))) {
 65		if (strcmp(epc_name, dev_name(dev)))
 66			continue;
 67
 68		epc = to_pci_epc(dev);
 69		if (!try_module_get(epc->ops->owner)) {
 70			ret = -EINVAL;
 71			goto err;
 72		}
 73
 74		class_dev_iter_exit(&iter);
 75		get_device(&epc->dev);
 76		return epc;
 77	}
 78
 79err:
 80	class_dev_iter_exit(&iter);
 81	return ERR_PTR(ret);
 82}
 83EXPORT_SYMBOL_GPL(pci_epc_get);
 84
 85/**
 86 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
 87 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
 88 *
 89 * Invoke to get the first unreserved BAR that can be used by the endpoint
 90 * function.
 91 */
 92enum pci_barno
 93pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
 94{
 95	return pci_epc_get_next_free_bar(epc_features, BAR_0);
 96}
 97EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
 98
 99/**
100 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
101 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
102 * @bar: the starting BAR number from where unreserved BAR should be searched
103 *
104 * Invoke to get the next unreserved BAR starting from @bar that can be used
105 * for endpoint function.
106 */
107enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
108					 *epc_features, enum pci_barno bar)
109{
110	int i;
111
112	if (!epc_features)
113		return BAR_0;
114
115	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
116	if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
117		bar++;
118
119	for (i = bar; i < PCI_STD_NUM_BARS; i++) {
120		/* If the BAR is not reserved, return it. */
121		if (epc_features->bar[i].type != BAR_RESERVED)
122			return i;
123	}
124
125	return NO_BAR;
126}
127EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
128
129/**
130 * pci_epc_get_features() - get the features supported by EPC
131 * @epc: the features supported by *this* EPC device will be returned
132 * @func_no: the features supported by the EPC device specific to the
133 *	     endpoint function with func_no will be returned
134 * @vfunc_no: the features supported by the EPC device specific to the
135 *	     virtual endpoint function with vfunc_no will be returned
136 *
137 * Invoke to get the features provided by the EPC which may be
138 * specific to an endpoint function. Returns pci_epc_features on success
139 * and NULL for any failures.
140 */
141const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
142						    u8 func_no, u8 vfunc_no)
143{
144	const struct pci_epc_features *epc_features;
 
145
146	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
147		return NULL;
148
149	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
150		return NULL;
151
152	if (!epc->ops->get_features)
153		return NULL;
154
155	mutex_lock(&epc->lock);
156	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
157	mutex_unlock(&epc->lock);
158
159	return epc_features;
160}
161EXPORT_SYMBOL_GPL(pci_epc_get_features);
162
163/**
164 * pci_epc_stop() - stop the PCI link
165 * @epc: the link of the EPC device that has to be stopped
166 *
167 * Invoke to stop the PCI link
168 */
169void pci_epc_stop(struct pci_epc *epc)
170{
 
 
171	if (IS_ERR(epc) || !epc->ops->stop)
172		return;
173
174	mutex_lock(&epc->lock);
175	epc->ops->stop(epc);
176	mutex_unlock(&epc->lock);
177}
178EXPORT_SYMBOL_GPL(pci_epc_stop);
179
180/**
181 * pci_epc_start() - start the PCI link
182 * @epc: the link of *this* EPC device has to be started
183 *
184 * Invoke to start the PCI link
185 */
186int pci_epc_start(struct pci_epc *epc)
187{
188	int ret;
 
189
190	if (IS_ERR(epc))
191		return -EINVAL;
192
193	if (!epc->ops->start)
194		return 0;
195
196	mutex_lock(&epc->lock);
197	ret = epc->ops->start(epc);
198	mutex_unlock(&epc->lock);
199
200	return ret;
201}
202EXPORT_SYMBOL_GPL(pci_epc_start);
203
204/**
205 * pci_epc_raise_irq() - interrupt the host system
206 * @epc: the EPC device which has to interrupt the host
207 * @func_no: the physical endpoint function number in the EPC device
208 * @vfunc_no: the virtual endpoint function number in the physical function
209 * @type: specify the type of interrupt; INTX, MSI or MSI-X
210 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
211 *
212 * Invoke to raise an INTX, MSI or MSI-X interrupt
213 */
214int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
215		      unsigned int type, u16 interrupt_num)
216{
217	int ret;
 
218
219	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
220		return -EINVAL;
221
222	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
223		return -EINVAL;
224
225	if (!epc->ops->raise_irq)
226		return 0;
227
228	mutex_lock(&epc->lock);
229	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
230	mutex_unlock(&epc->lock);
231
232	return ret;
233}
234EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
235
236/**
237 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
238 *                         MSI data
239 * @epc: the EPC device which has the MSI capability
240 * @func_no: the physical endpoint function number in the EPC device
241 * @vfunc_no: the virtual endpoint function number in the physical function
242 * @phys_addr: the physical address of the outbound region
243 * @interrupt_num: the MSI interrupt number with range (1-N)
244 * @entry_size: Size of Outbound address region for each interrupt
245 * @msi_data: the data that should be written in order to raise MSI interrupt
246 *            with interrupt number as 'interrupt num'
247 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
248 *                   to which the MSI address is mapped
249 *
250 * Invoke to map physical address to MSI address and return MSI data. The
251 * physical address should be an address in the outbound region. This is
252 * required to implement doorbell functionality of NTB wherein EPC on either
253 * side of the interface (primary and secondary) can directly write to the
254 * physical address (in outbound region) of the other interface to ring
255 * doorbell.
256 */
257int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
258			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
259			u32 *msi_data, u32 *msi_addr_offset)
260{
261	int ret;
262
263	if (IS_ERR_OR_NULL(epc))
264		return -EINVAL;
265
266	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
267		return -EINVAL;
268
269	if (!epc->ops->map_msi_irq)
270		return -EINVAL;
271
272	mutex_lock(&epc->lock);
273	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
274				    interrupt_num, entry_size, msi_data,
275				    msi_addr_offset);
276	mutex_unlock(&epc->lock);
277
278	return ret;
279}
280EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
281
282/**
283 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
284 * @epc: the EPC device to which MSI interrupts was requested
285 * @func_no: the physical endpoint function number in the EPC device
286 * @vfunc_no: the virtual endpoint function number in the physical function
287 *
288 * Invoke to get the number of MSI interrupts allocated by the RC
289 */
290int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
291{
292	int interrupt;
 
293
294	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
295		return 0;
296
297	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
298		return 0;
299
300	if (!epc->ops->get_msi)
301		return 0;
302
303	mutex_lock(&epc->lock);
304	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
305	mutex_unlock(&epc->lock);
306
307	if (interrupt < 0)
308		return 0;
309
310	interrupt = 1 << interrupt;
311
312	return interrupt;
313}
314EXPORT_SYMBOL_GPL(pci_epc_get_msi);
315
316/**
317 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
318 * @epc: the EPC device on which MSI has to be configured
319 * @func_no: the physical endpoint function number in the EPC device
320 * @vfunc_no: the virtual endpoint function number in the physical function
321 * @interrupts: number of MSI interrupts required by the EPF
322 *
323 * Invoke to set the required number of MSI interrupts.
324 */
325int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
326{
327	int ret;
328	u8 encode_int;
 
329
330	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
331	    interrupts < 1 || interrupts > 32)
332		return -EINVAL;
333
334	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
335		return -EINVAL;
336
337	if (!epc->ops->set_msi)
338		return 0;
339
340	encode_int = order_base_2(interrupts);
341
342	mutex_lock(&epc->lock);
343	ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
344	mutex_unlock(&epc->lock);
345
346	return ret;
347}
348EXPORT_SYMBOL_GPL(pci_epc_set_msi);
349
350/**
351 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
352 * @epc: the EPC device to which MSI-X interrupts was requested
353 * @func_no: the physical endpoint function number in the EPC device
354 * @vfunc_no: the virtual endpoint function number in the physical function
355 *
356 * Invoke to get the number of MSI-X interrupts allocated by the RC
357 */
358int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
359{
360	int interrupt;
 
361
362	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
363		return 0;
364
365	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
366		return 0;
367
368	if (!epc->ops->get_msix)
369		return 0;
370
371	mutex_lock(&epc->lock);
372	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
373	mutex_unlock(&epc->lock);
374
375	if (interrupt < 0)
376		return 0;
377
378	return interrupt + 1;
379}
380EXPORT_SYMBOL_GPL(pci_epc_get_msix);
381
382/**
383 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
384 * @epc: the EPC device on which MSI-X has to be configured
385 * @func_no: the physical endpoint function number in the EPC device
386 * @vfunc_no: the virtual endpoint function number in the physical function
387 * @interrupts: number of MSI-X interrupts required by the EPF
388 * @bir: BAR where the MSI-X table resides
389 * @offset: Offset pointing to the start of MSI-X table
390 *
391 * Invoke to set the required number of MSI-X interrupts.
392 */
393int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
394		     u16 interrupts, enum pci_barno bir, u32 offset)
395{
396	int ret;
 
397
398	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
399	    interrupts < 1 || interrupts > 2048)
400		return -EINVAL;
401
402	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
403		return -EINVAL;
404
405	if (!epc->ops->set_msix)
406		return 0;
407
408	mutex_lock(&epc->lock);
409	ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
410				 offset);
411	mutex_unlock(&epc->lock);
412
413	return ret;
414}
415EXPORT_SYMBOL_GPL(pci_epc_set_msix);
416
417/**
418 * pci_epc_unmap_addr() - unmap CPU address from PCI address
419 * @epc: the EPC device on which address is allocated
420 * @func_no: the physical endpoint function number in the EPC device
421 * @vfunc_no: the virtual endpoint function number in the physical function
422 * @phys_addr: physical address of the local system
423 *
424 * Invoke to unmap the CPU address from PCI address.
425 */
426void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
427			phys_addr_t phys_addr)
428{
 
 
429	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
430		return;
431
432	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
433		return;
434
435	if (!epc->ops->unmap_addr)
436		return;
437
438	mutex_lock(&epc->lock);
439	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
440	mutex_unlock(&epc->lock);
441}
442EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
443
444/**
445 * pci_epc_map_addr() - map CPU address to PCI address
446 * @epc: the EPC device on which address is allocated
447 * @func_no: the physical endpoint function number in the EPC device
448 * @vfunc_no: the virtual endpoint function number in the physical function
449 * @phys_addr: physical address of the local system
450 * @pci_addr: PCI address to which the physical address should be mapped
451 * @size: the size of the allocation
452 *
453 * Invoke to map CPU address with PCI address.
454 */
455int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
456		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
457{
458	int ret;
 
459
460	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
461		return -EINVAL;
462
463	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
464		return -EINVAL;
465
466	if (!epc->ops->map_addr)
467		return 0;
468
469	mutex_lock(&epc->lock);
470	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
471				 size);
472	mutex_unlock(&epc->lock);
473
474	return ret;
475}
476EXPORT_SYMBOL_GPL(pci_epc_map_addr);
477
478/**
479 * pci_epc_clear_bar() - reset the BAR
480 * @epc: the EPC device for which the BAR has to be cleared
481 * @func_no: the physical endpoint function number in the EPC device
482 * @vfunc_no: the virtual endpoint function number in the physical function
483 * @epf_bar: the struct epf_bar that contains the BAR information
484 *
485 * Invoke to reset the BAR of the endpoint device.
486 */
487void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
488		       struct pci_epf_bar *epf_bar)
489{
 
 
490	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
491	    (epf_bar->barno == BAR_5 &&
492	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
493		return;
494
495	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
496		return;
497
498	if (!epc->ops->clear_bar)
499		return;
500
501	mutex_lock(&epc->lock);
502	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
503	mutex_unlock(&epc->lock);
504}
505EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
506
507/**
508 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
509 * @epc: the EPC device on which BAR has to be configured
510 * @func_no: the physical endpoint function number in the EPC device
511 * @vfunc_no: the virtual endpoint function number in the physical function
512 * @epf_bar: the struct epf_bar that contains the BAR information
513 *
514 * Invoke to configure the BAR of the endpoint device.
515 */
516int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
517		    struct pci_epf_bar *epf_bar)
518{
519	int ret;
 
520	int flags = epf_bar->flags;
521
522	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
523	    (epf_bar->barno == BAR_5 &&
524	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
525	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
526	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
527	    (upper_32_bits(epf_bar->size) &&
528	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
529		return -EINVAL;
530
531	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
532		return -EINVAL;
533
534	if (!epc->ops->set_bar)
535		return 0;
536
537	mutex_lock(&epc->lock);
538	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
539	mutex_unlock(&epc->lock);
540
541	return ret;
542}
543EXPORT_SYMBOL_GPL(pci_epc_set_bar);
544
545/**
546 * pci_epc_write_header() - write standard configuration header
547 * @epc: the EPC device to which the configuration header should be written
548 * @func_no: the physical endpoint function number in the EPC device
549 * @vfunc_no: the virtual endpoint function number in the physical function
550 * @header: standard configuration header fields
551 *
552 * Invoke to write the configuration header to the endpoint controller. Every
553 * endpoint controller will have a dedicated location to which the standard
554 * configuration header would be written. The callback function should write
555 * the header fields to this dedicated location.
556 */
557int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
558			 struct pci_epf_header *header)
559{
560	int ret;
 
561
562	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
563		return -EINVAL;
564
565	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
566		return -EINVAL;
567
568	/* Only Virtual Function #1 has deviceID */
569	if (vfunc_no > 1)
570		return -EINVAL;
571
572	if (!epc->ops->write_header)
573		return 0;
574
575	mutex_lock(&epc->lock);
576	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
577	mutex_unlock(&epc->lock);
578
579	return ret;
580}
581EXPORT_SYMBOL_GPL(pci_epc_write_header);
582
583/**
584 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
585 * @epc: the EPC device to which the endpoint function should be added
586 * @epf: the endpoint function to be added
587 * @type: Identifies if the EPC is connected to the primary or secondary
588 *        interface of EPF
589 *
590 * A PCI endpoint device can have one or more functions. In the case of PCIe,
591 * the specification allows up to 8 PCIe endpoint functions. Invoke
592 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
593 */
594int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
595		    enum pci_epc_interface_type type)
596{
597	struct list_head *list;
598	u32 func_no;
599	int ret = 0;
600
601	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
602		return -EINVAL;
603
604	if (type == PRIMARY_INTERFACE && epf->epc)
605		return -EBUSY;
606
607	if (type == SECONDARY_INTERFACE && epf->sec_epc)
608		return -EBUSY;
609
610	mutex_lock(&epc->list_lock);
611	func_no = find_first_zero_bit(&epc->function_num_map,
612				      BITS_PER_LONG);
613	if (func_no >= BITS_PER_LONG) {
614		ret = -EINVAL;
615		goto ret;
616	}
617
618	if (func_no > epc->max_functions - 1) {
619		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
620		ret = -EINVAL;
621		goto ret;
622	}
623
624	set_bit(func_no, &epc->function_num_map);
625	if (type == PRIMARY_INTERFACE) {
626		epf->func_no = func_no;
627		epf->epc = epc;
628		list = &epf->list;
629	} else {
630		epf->sec_epc_func_no = func_no;
631		epf->sec_epc = epc;
632		list = &epf->sec_epc_list;
633	}
634
635	list_add_tail(list, &epc->pci_epf);
636ret:
637	mutex_unlock(&epc->list_lock);
638
639	return ret;
640}
641EXPORT_SYMBOL_GPL(pci_epc_add_epf);
642
643/**
644 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
645 * @epc: the EPC device from which the endpoint function should be removed
646 * @epf: the endpoint function to be removed
647 * @type: identifies if the EPC is connected to the primary or secondary
648 *        interface of EPF
649 *
650 * Invoke to remove PCI endpoint function from the endpoint controller.
651 */
652void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
653			enum pci_epc_interface_type type)
654{
655	struct list_head *list;
656	u32 func_no = 0;
657
658	if (IS_ERR_OR_NULL(epc) || !epf)
659		return;
660
661	if (type == PRIMARY_INTERFACE) {
662		func_no = epf->func_no;
663		list = &epf->list;
664	} else {
665		func_no = epf->sec_epc_func_no;
666		list = &epf->sec_epc_list;
667	}
668
669	mutex_lock(&epc->list_lock);
670	clear_bit(func_no, &epc->function_num_map);
671	list_del(list);
672	epf->epc = NULL;
673	mutex_unlock(&epc->list_lock);
674}
675EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
676
677/**
678 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
679 *		      connection with the Root Complex.
680 * @epc: the EPC device which has established link with the host
681 *
682 * Invoke to Notify the EPF device that the EPC device has established a
683 * connection with the Root Complex.
684 */
685void pci_epc_linkup(struct pci_epc *epc)
686{
 
687	struct pci_epf *epf;
688
689	if (IS_ERR_OR_NULL(epc))
690		return;
691
692	mutex_lock(&epc->list_lock);
693	list_for_each_entry(epf, &epc->pci_epf, list) {
694		mutex_lock(&epf->lock);
695		if (epf->event_ops && epf->event_ops->link_up)
696			epf->event_ops->link_up(epf);
697		mutex_unlock(&epf->lock);
698	}
699	mutex_unlock(&epc->list_lock);
700}
701EXPORT_SYMBOL_GPL(pci_epc_linkup);
702
703/**
704 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
705 *			connection with the Root Complex.
706 * @epc: the EPC device which has dropped the link with the host
707 *
708 * Invoke to Notify the EPF device that the EPC device has dropped the
709 * connection with the Root Complex.
710 */
711void pci_epc_linkdown(struct pci_epc *epc)
712{
713	struct pci_epf *epf;
714
715	if (IS_ERR_OR_NULL(epc))
716		return;
717
718	mutex_lock(&epc->list_lock);
719	list_for_each_entry(epf, &epc->pci_epf, list) {
720		mutex_lock(&epf->lock);
721		if (epf->event_ops && epf->event_ops->link_down)
722			epf->event_ops->link_down(epf);
723		mutex_unlock(&epf->lock);
724	}
725	mutex_unlock(&epc->list_lock);
726}
727EXPORT_SYMBOL_GPL(pci_epc_linkdown);
728
729/**
730 * pci_epc_init_notify() - Notify the EPF device that EPC device's core
731 *			   initialization is completed.
732 * @epc: the EPC device whose core initialization is completed
733 *
734 * Invoke to Notify the EPF device that the EPC device's initialization
735 * is completed.
736 */
737void pci_epc_init_notify(struct pci_epc *epc)
738{
739	struct pci_epf *epf;
740
741	if (IS_ERR_OR_NULL(epc))
742		return;
743
744	mutex_lock(&epc->list_lock);
745	list_for_each_entry(epf, &epc->pci_epf, list) {
746		mutex_lock(&epf->lock);
747		if (epf->event_ops && epf->event_ops->core_init)
748			epf->event_ops->core_init(epf);
749		mutex_unlock(&epf->lock);
750	}
751	mutex_unlock(&epc->list_lock);
752}
753EXPORT_SYMBOL_GPL(pci_epc_init_notify);
754
755/**
756 * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
757 *			  the BME event from the Root complex
758 * @epc: the EPC device that received the BME event
759 *
760 * Invoke to Notify the EPF device that the EPC device has received the Bus
761 * Master Enable (BME) event from the Root complex
762 */
763void pci_epc_bme_notify(struct pci_epc *epc)
764{
765	struct pci_epf *epf;
766
767	if (IS_ERR_OR_NULL(epc))
768		return;
769
770	mutex_lock(&epc->list_lock);
771	list_for_each_entry(epf, &epc->pci_epf, list) {
772		mutex_lock(&epf->lock);
773		if (epf->event_ops && epf->event_ops->bme)
774			epf->event_ops->bme(epf);
775		mutex_unlock(&epf->lock);
776	}
777	mutex_unlock(&epc->list_lock);
778}
779EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
780
781/**
782 * pci_epc_destroy() - destroy the EPC device
783 * @epc: the EPC device that has to be destroyed
784 *
785 * Invoke to destroy the PCI EPC device
786 */
787void pci_epc_destroy(struct pci_epc *epc)
788{
789	pci_ep_cfs_remove_epc_group(epc->group);
790	device_unregister(&epc->dev);
 
791}
792EXPORT_SYMBOL_GPL(pci_epc_destroy);
793
794/**
795 * devm_pci_epc_destroy() - destroy the EPC device
796 * @dev: device that wants to destroy the EPC
797 * @epc: the EPC device that has to be destroyed
798 *
799 * Invoke to destroy the devres associated with this
800 * pci_epc and destroy the EPC device.
801 */
802void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
803{
804	int r;
805
806	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
807			   epc);
808	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
809}
810EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
811
812static void pci_epc_release(struct device *dev)
813{
814	kfree(to_pci_epc(dev));
815}
816
817/**
818 * __pci_epc_create() - create a new endpoint controller (EPC) device
819 * @dev: device that is creating the new EPC
820 * @ops: function pointers for performing EPC operations
821 * @owner: the owner of the module that creates the EPC device
822 *
823 * Invoke to create a new EPC device and add it to pci_epc class.
824 */
825struct pci_epc *
826__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
827		 struct module *owner)
828{
829	int ret;
830	struct pci_epc *epc;
831
832	if (WARN_ON(!dev)) {
833		ret = -EINVAL;
834		goto err_ret;
835	}
836
837	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
838	if (!epc) {
839		ret = -ENOMEM;
840		goto err_ret;
841	}
842
843	mutex_init(&epc->lock);
844	mutex_init(&epc->list_lock);
845	INIT_LIST_HEAD(&epc->pci_epf);
846
847	device_initialize(&epc->dev);
848	epc->dev.class = pci_epc_class;
849	epc->dev.parent = dev;
850	epc->dev.release = pci_epc_release;
851	epc->ops = ops;
852
853	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
854	if (ret)
855		goto put_dev;
856
857	ret = device_add(&epc->dev);
858	if (ret)
859		goto put_dev;
860
861	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
862
863	return epc;
864
865put_dev:
866	put_device(&epc->dev);
 
867
868err_ret:
869	return ERR_PTR(ret);
870}
871EXPORT_SYMBOL_GPL(__pci_epc_create);
872
873/**
874 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
875 * @dev: device that is creating the new EPC
876 * @ops: function pointers for performing EPC operations
877 * @owner: the owner of the module that creates the EPC device
878 *
879 * Invoke to create a new EPC device and add it to pci_epc class.
880 * While at that, it also associates the device with the pci_epc using devres.
881 * On driver detach, release function is invoked on the devres data,
882 * then, devres data is freed.
883 */
884struct pci_epc *
885__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
886		      struct module *owner)
887{
888	struct pci_epc **ptr, *epc;
889
890	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
891	if (!ptr)
892		return ERR_PTR(-ENOMEM);
893
894	epc = __pci_epc_create(dev, ops, owner);
895	if (!IS_ERR(epc)) {
896		*ptr = epc;
897		devres_add(dev, ptr);
898	} else {
899		devres_free(ptr);
900	}
901
902	return epc;
903}
904EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
905
906static int __init pci_epc_init(void)
907{
908	pci_epc_class = class_create("pci_epc");
909	if (IS_ERR(pci_epc_class)) {
910		pr_err("failed to create pci epc class --> %ld\n",
911		       PTR_ERR(pci_epc_class));
912		return PTR_ERR(pci_epc_class);
913	}
914
915	return 0;
916}
917module_init(pci_epc_init);
918
919static void __exit pci_epc_exit(void)
920{
921	class_destroy(pci_epc_class);
922}
923module_exit(pci_epc_exit);
924
925MODULE_DESCRIPTION("PCI EPC Library");
926MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");