Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PCI Endpoint *Controller* (EPC) library
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/module.h>
 
 12
 13#include <linux/pci-epc.h>
 14#include <linux/pci-epf.h>
 15#include <linux/pci-ep-cfs.h>
 16
 17static struct class *pci_epc_class;
 18
 19static void devm_pci_epc_release(struct device *dev, void *res)
 20{
 21	struct pci_epc *epc = *(struct pci_epc **)res;
 22
 23	pci_epc_destroy(epc);
 24}
 25
 26static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
 27{
 28	struct pci_epc **epc = res;
 29
 30	return *epc == match_data;
 31}
 32
 33/**
 34 * pci_epc_put() - release the PCI endpoint controller
 35 * @epc: epc returned by pci_epc_get()
 36 *
 37 * release the refcount the caller obtained by invoking pci_epc_get()
 38 */
 39void pci_epc_put(struct pci_epc *epc)
 40{
 41	if (IS_ERR_OR_NULL(epc))
 42		return;
 43
 44	module_put(epc->ops->owner);
 45	put_device(&epc->dev);
 46}
 47EXPORT_SYMBOL_GPL(pci_epc_put);
 48
 49/**
 50 * pci_epc_get() - get the PCI endpoint controller
 51 * @epc_name: device name of the endpoint controller
 52 *
 53 * Invoke to get struct pci_epc * corresponding to the device name of the
 54 * endpoint controller
 55 */
 56struct pci_epc *pci_epc_get(const char *epc_name)
 57{
 58	int ret = -EINVAL;
 59	struct pci_epc *epc;
 60	struct device *dev;
 61	struct class_dev_iter iter;
 62
 63	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
 64	while ((dev = class_dev_iter_next(&iter))) {
 65		if (strcmp(epc_name, dev_name(dev)))
 66			continue;
 67
 68		epc = to_pci_epc(dev);
 69		if (!try_module_get(epc->ops->owner)) {
 70			ret = -EINVAL;
 71			goto err;
 72		}
 73
 74		class_dev_iter_exit(&iter);
 75		get_device(&epc->dev);
 76		return epc;
 77	}
 78
 79err:
 80	class_dev_iter_exit(&iter);
 81	return ERR_PTR(ret);
 82}
 83EXPORT_SYMBOL_GPL(pci_epc_get);
 84
 85/**
 86 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
 87 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
 88 *
 89 * Invoke to get the first unreserved BAR that can be used by the endpoint
 90 * function. For any incorrect value in reserved_bar return '0'.
 91 */
 92enum pci_barno
 93pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
 94{
 95	return pci_epc_get_next_free_bar(epc_features, BAR_0);
 96}
 97EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
 98
 99/**
100 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
101 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
102 * @bar: the starting BAR number from where unreserved BAR should be searched
103 *
104 * Invoke to get the next unreserved BAR starting from @bar that can be used
105 * for endpoint function. For any incorrect value in reserved_bar return '0'.
106 */
107enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
108					 *epc_features, enum pci_barno bar)
109{
110	unsigned long free_bar;
111
112	if (!epc_features)
113		return BAR_0;
114
115	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
116	if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
117		bar++;
118
119	/* Find if the reserved BAR is also a 64-bit BAR */
120	free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
121
122	/* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
123	free_bar <<= 1;
124	free_bar |= epc_features->reserved_bar;
125
126	free_bar = find_next_zero_bit(&free_bar, 6, bar);
127	if (free_bar > 5)
128		return NO_BAR;
129
130	return free_bar;
131}
132EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
133
134/**
135 * pci_epc_get_features() - get the features supported by EPC
136 * @epc: the features supported by *this* EPC device will be returned
137 * @func_no: the features supported by the EPC device specific to the
138 *	     endpoint function with func_no will be returned
139 * @vfunc_no: the features supported by the EPC device specific to the
140 *	     virtual endpoint function with vfunc_no will be returned
141 *
142 * Invoke to get the features provided by the EPC which may be
143 * specific to an endpoint function. Returns pci_epc_features on success
144 * and NULL for any failures.
145 */
146const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
147						    u8 func_no, u8 vfunc_no)
148{
149	const struct pci_epc_features *epc_features;
150
151	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
152		return NULL;
153
154	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
155		return NULL;
156
157	if (!epc->ops->get_features)
158		return NULL;
159
160	mutex_lock(&epc->lock);
161	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
162	mutex_unlock(&epc->lock);
163
164	return epc_features;
165}
166EXPORT_SYMBOL_GPL(pci_epc_get_features);
167
168/**
169 * pci_epc_stop() - stop the PCI link
170 * @epc: the link of the EPC device that has to be stopped
171 *
172 * Invoke to stop the PCI link
173 */
174void pci_epc_stop(struct pci_epc *epc)
175{
 
 
176	if (IS_ERR(epc) || !epc->ops->stop)
177		return;
178
179	mutex_lock(&epc->lock);
180	epc->ops->stop(epc);
181	mutex_unlock(&epc->lock);
182}
183EXPORT_SYMBOL_GPL(pci_epc_stop);
184
185/**
186 * pci_epc_start() - start the PCI link
187 * @epc: the link of *this* EPC device has to be started
188 *
189 * Invoke to start the PCI link
190 */
191int pci_epc_start(struct pci_epc *epc)
192{
193	int ret;
 
194
195	if (IS_ERR(epc))
196		return -EINVAL;
197
198	if (!epc->ops->start)
199		return 0;
200
201	mutex_lock(&epc->lock);
202	ret = epc->ops->start(epc);
203	mutex_unlock(&epc->lock);
204
205	return ret;
206}
207EXPORT_SYMBOL_GPL(pci_epc_start);
208
209/**
210 * pci_epc_raise_irq() - interrupt the host system
211 * @epc: the EPC device which has to interrupt the host
212 * @func_no: the physical endpoint function number in the EPC device
213 * @vfunc_no: the virtual endpoint function number in the physical function
214 * @type: specify the type of interrupt; INTX, MSI or MSI-X
215 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
216 *
217 * Invoke to raise an INTX, MSI or MSI-X interrupt
218 */
219int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
220		      unsigned int type, u16 interrupt_num)
221{
222	int ret;
 
223
224	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
225		return -EINVAL;
226
227	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
228		return -EINVAL;
229
230	if (!epc->ops->raise_irq)
231		return 0;
232
233	mutex_lock(&epc->lock);
234	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
235	mutex_unlock(&epc->lock);
236
237	return ret;
238}
239EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
240
241/**
242 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
243 *                         MSI data
244 * @epc: the EPC device which has the MSI capability
245 * @func_no: the physical endpoint function number in the EPC device
246 * @vfunc_no: the virtual endpoint function number in the physical function
247 * @phys_addr: the physical address of the outbound region
248 * @interrupt_num: the MSI interrupt number with range (1-N)
249 * @entry_size: Size of Outbound address region for each interrupt
250 * @msi_data: the data that should be written in order to raise MSI interrupt
251 *            with interrupt number as 'interrupt num'
252 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
253 *                   to which the MSI address is mapped
254 *
255 * Invoke to map physical address to MSI address and return MSI data. The
256 * physical address should be an address in the outbound region. This is
257 * required to implement doorbell functionality of NTB wherein EPC on either
258 * side of the interface (primary and secondary) can directly write to the
259 * physical address (in outbound region) of the other interface to ring
260 * doorbell.
261 */
262int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
263			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
264			u32 *msi_data, u32 *msi_addr_offset)
265{
266	int ret;
267
268	if (IS_ERR_OR_NULL(epc))
269		return -EINVAL;
270
271	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
272		return -EINVAL;
273
274	if (!epc->ops->map_msi_irq)
275		return -EINVAL;
276
277	mutex_lock(&epc->lock);
278	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
279				    interrupt_num, entry_size, msi_data,
280				    msi_addr_offset);
281	mutex_unlock(&epc->lock);
282
283	return ret;
284}
285EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
286
287/**
288 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
289 * @epc: the EPC device to which MSI interrupts was requested
290 * @func_no: the physical endpoint function number in the EPC device
291 * @vfunc_no: the virtual endpoint function number in the physical function
292 *
293 * Invoke to get the number of MSI interrupts allocated by the RC
294 */
295int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
296{
297	int interrupt;
 
298
299	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
300		return 0;
301
302	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
303		return 0;
304
305	if (!epc->ops->get_msi)
306		return 0;
307
308	mutex_lock(&epc->lock);
309	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
310	mutex_unlock(&epc->lock);
311
312	if (interrupt < 0)
313		return 0;
314
315	interrupt = 1 << interrupt;
316
317	return interrupt;
318}
319EXPORT_SYMBOL_GPL(pci_epc_get_msi);
320
321/**
322 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
323 * @epc: the EPC device on which MSI has to be configured
324 * @func_no: the physical endpoint function number in the EPC device
325 * @vfunc_no: the virtual endpoint function number in the physical function
326 * @interrupts: number of MSI interrupts required by the EPF
327 *
328 * Invoke to set the required number of MSI interrupts.
329 */
330int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
331{
332	int ret;
333	u8 encode_int;
 
334
335	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
336	    interrupts < 1 || interrupts > 32)
337		return -EINVAL;
338
339	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
340		return -EINVAL;
341
342	if (!epc->ops->set_msi)
343		return 0;
344
345	encode_int = order_base_2(interrupts);
346
347	mutex_lock(&epc->lock);
348	ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
349	mutex_unlock(&epc->lock);
350
351	return ret;
352}
353EXPORT_SYMBOL_GPL(pci_epc_set_msi);
354
355/**
356 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
357 * @epc: the EPC device to which MSI-X interrupts was requested
358 * @func_no: the physical endpoint function number in the EPC device
359 * @vfunc_no: the virtual endpoint function number in the physical function
360 *
361 * Invoke to get the number of MSI-X interrupts allocated by the RC
362 */
363int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
364{
365	int interrupt;
366
367	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
368		return 0;
369
370	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
371		return 0;
372
373	if (!epc->ops->get_msix)
374		return 0;
375
376	mutex_lock(&epc->lock);
377	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
378	mutex_unlock(&epc->lock);
379
380	if (interrupt < 0)
381		return 0;
382
383	return interrupt + 1;
384}
385EXPORT_SYMBOL_GPL(pci_epc_get_msix);
386
387/**
388 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
389 * @epc: the EPC device on which MSI-X has to be configured
390 * @func_no: the physical endpoint function number in the EPC device
391 * @vfunc_no: the virtual endpoint function number in the physical function
392 * @interrupts: number of MSI-X interrupts required by the EPF
393 * @bir: BAR where the MSI-X table resides
394 * @offset: Offset pointing to the start of MSI-X table
395 *
396 * Invoke to set the required number of MSI-X interrupts.
397 */
398int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
399		     u16 interrupts, enum pci_barno bir, u32 offset)
400{
401	int ret;
402
403	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
404	    interrupts < 1 || interrupts > 2048)
405		return -EINVAL;
406
407	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
408		return -EINVAL;
409
410	if (!epc->ops->set_msix)
411		return 0;
412
413	mutex_lock(&epc->lock);
414	ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
415				 offset);
416	mutex_unlock(&epc->lock);
417
418	return ret;
419}
420EXPORT_SYMBOL_GPL(pci_epc_set_msix);
421
422/**
423 * pci_epc_unmap_addr() - unmap CPU address from PCI address
424 * @epc: the EPC device on which address is allocated
425 * @func_no: the physical endpoint function number in the EPC device
426 * @vfunc_no: the virtual endpoint function number in the physical function
427 * @phys_addr: physical address of the local system
428 *
429 * Invoke to unmap the CPU address from PCI address.
430 */
431void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
432			phys_addr_t phys_addr)
433{
434	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
435		return;
436
437	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
438		return;
439
440	if (!epc->ops->unmap_addr)
441		return;
442
443	mutex_lock(&epc->lock);
444	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
445	mutex_unlock(&epc->lock);
446}
447EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
448
449/**
450 * pci_epc_map_addr() - map CPU address to PCI address
451 * @epc: the EPC device on which address is allocated
452 * @func_no: the physical endpoint function number in the EPC device
453 * @vfunc_no: the virtual endpoint function number in the physical function
454 * @phys_addr: physical address of the local system
455 * @pci_addr: PCI address to which the physical address should be mapped
456 * @size: the size of the allocation
457 *
458 * Invoke to map CPU address with PCI address.
459 */
460int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
461		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
462{
463	int ret;
 
464
465	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
466		return -EINVAL;
467
468	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
469		return -EINVAL;
470
471	if (!epc->ops->map_addr)
472		return 0;
473
474	mutex_lock(&epc->lock);
475	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
476				 size);
477	mutex_unlock(&epc->lock);
478
479	return ret;
480}
481EXPORT_SYMBOL_GPL(pci_epc_map_addr);
482
483/**
484 * pci_epc_clear_bar() - reset the BAR
485 * @epc: the EPC device for which the BAR has to be cleared
486 * @func_no: the physical endpoint function number in the EPC device
487 * @vfunc_no: the virtual endpoint function number in the physical function
488 * @epf_bar: the struct epf_bar that contains the BAR information
489 *
490 * Invoke to reset the BAR of the endpoint device.
491 */
492void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
493		       struct pci_epf_bar *epf_bar)
494{
 
 
495	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
496	    (epf_bar->barno == BAR_5 &&
497	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
498		return;
499
500	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
501		return;
502
503	if (!epc->ops->clear_bar)
504		return;
505
506	mutex_lock(&epc->lock);
507	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
508	mutex_unlock(&epc->lock);
509}
510EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
511
512/**
513 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
514 * @epc: the EPC device on which BAR has to be configured
515 * @func_no: the physical endpoint function number in the EPC device
516 * @vfunc_no: the virtual endpoint function number in the physical function
517 * @epf_bar: the struct epf_bar that contains the BAR information
518 *
519 * Invoke to configure the BAR of the endpoint device.
520 */
521int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
522		    struct pci_epf_bar *epf_bar)
523{
524	int ret;
 
525	int flags = epf_bar->flags;
526
527	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
528	    (epf_bar->barno == BAR_5 &&
529	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
530	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
531	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
532	    (upper_32_bits(epf_bar->size) &&
533	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
534		return -EINVAL;
535
536	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
537		return -EINVAL;
538
539	if (!epc->ops->set_bar)
540		return 0;
541
542	mutex_lock(&epc->lock);
543	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
544	mutex_unlock(&epc->lock);
545
546	return ret;
547}
548EXPORT_SYMBOL_GPL(pci_epc_set_bar);
549
550/**
551 * pci_epc_write_header() - write standard configuration header
552 * @epc: the EPC device to which the configuration header should be written
553 * @func_no: the physical endpoint function number in the EPC device
554 * @vfunc_no: the virtual endpoint function number in the physical function
555 * @header: standard configuration header fields
556 *
557 * Invoke to write the configuration header to the endpoint controller. Every
558 * endpoint controller will have a dedicated location to which the standard
559 * configuration header would be written. The callback function should write
560 * the header fields to this dedicated location.
561 */
562int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
563			 struct pci_epf_header *header)
564{
565	int ret;
 
566
567	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
568		return -EINVAL;
569
570	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
571		return -EINVAL;
572
573	/* Only Virtual Function #1 has deviceID */
574	if (vfunc_no > 1)
575		return -EINVAL;
576
577	if (!epc->ops->write_header)
578		return 0;
579
580	mutex_lock(&epc->lock);
581	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
582	mutex_unlock(&epc->lock);
583
584	return ret;
585}
586EXPORT_SYMBOL_GPL(pci_epc_write_header);
587
588/**
589 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
590 * @epc: the EPC device to which the endpoint function should be added
591 * @epf: the endpoint function to be added
592 * @type: Identifies if the EPC is connected to the primary or secondary
593 *        interface of EPF
594 *
595 * A PCI endpoint device can have one or more functions. In the case of PCIe,
596 * the specification allows up to 8 PCIe endpoint functions. Invoke
597 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
598 */
599int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
600		    enum pci_epc_interface_type type)
601{
602	struct list_head *list;
603	u32 func_no;
604	int ret = 0;
605
606	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
607		return -EINVAL;
608
609	if (type == PRIMARY_INTERFACE && epf->epc)
610		return -EBUSY;
611
612	if (type == SECONDARY_INTERFACE && epf->sec_epc)
613		return -EBUSY;
614
615	mutex_lock(&epc->list_lock);
616	func_no = find_first_zero_bit(&epc->function_num_map,
617				      BITS_PER_LONG);
618	if (func_no >= BITS_PER_LONG) {
619		ret = -EINVAL;
620		goto ret;
621	}
622
623	if (func_no > epc->max_functions - 1) {
624		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
625		ret = -EINVAL;
626		goto ret;
627	}
628
629	set_bit(func_no, &epc->function_num_map);
630	if (type == PRIMARY_INTERFACE) {
631		epf->func_no = func_no;
632		epf->epc = epc;
633		list = &epf->list;
634	} else {
635		epf->sec_epc_func_no = func_no;
636		epf->sec_epc = epc;
637		list = &epf->sec_epc_list;
638	}
639
640	list_add_tail(list, &epc->pci_epf);
641ret:
642	mutex_unlock(&epc->list_lock);
643
644	return ret;
645}
646EXPORT_SYMBOL_GPL(pci_epc_add_epf);
647
648/**
649 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
650 * @epc: the EPC device from which the endpoint function should be removed
651 * @epf: the endpoint function to be removed
652 * @type: identifies if the EPC is connected to the primary or secondary
653 *        interface of EPF
654 *
655 * Invoke to remove PCI endpoint function from the endpoint controller.
656 */
657void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
658			enum pci_epc_interface_type type)
659{
660	struct list_head *list;
661	u32 func_no = 0;
662
663	if (IS_ERR_OR_NULL(epc) || !epf)
664		return;
665
666	if (type == PRIMARY_INTERFACE) {
667		func_no = epf->func_no;
668		list = &epf->list;
669	} else {
670		func_no = epf->sec_epc_func_no;
671		list = &epf->sec_epc_list;
672	}
673
674	mutex_lock(&epc->list_lock);
675	clear_bit(func_no, &epc->function_num_map);
676	list_del(list);
677	epf->epc = NULL;
678	mutex_unlock(&epc->list_lock);
679}
680EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
681
682/**
683 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
684 *		      connection with the Root Complex.
685 * @epc: the EPC device which has established link with the host
686 *
687 * Invoke to Notify the EPF device that the EPC device has established a
688 * connection with the Root Complex.
689 */
690void pci_epc_linkup(struct pci_epc *epc)
691{
 
692	struct pci_epf *epf;
693
694	if (IS_ERR_OR_NULL(epc))
695		return;
696
697	mutex_lock(&epc->list_lock);
698	list_for_each_entry(epf, &epc->pci_epf, list) {
699		mutex_lock(&epf->lock);
700		if (epf->event_ops && epf->event_ops->link_up)
701			epf->event_ops->link_up(epf);
702		mutex_unlock(&epf->lock);
703	}
704	mutex_unlock(&epc->list_lock);
705}
706EXPORT_SYMBOL_GPL(pci_epc_linkup);
707
708/**
709 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
710 *			connection with the Root Complex.
711 * @epc: the EPC device which has dropped the link with the host
712 *
713 * Invoke to Notify the EPF device that the EPC device has dropped the
714 * connection with the Root Complex.
715 */
716void pci_epc_linkdown(struct pci_epc *epc)
717{
718	struct pci_epf *epf;
719
720	if (IS_ERR_OR_NULL(epc))
721		return;
722
723	mutex_lock(&epc->list_lock);
724	list_for_each_entry(epf, &epc->pci_epf, list) {
725		mutex_lock(&epf->lock);
726		if (epf->event_ops && epf->event_ops->link_down)
727			epf->event_ops->link_down(epf);
728		mutex_unlock(&epf->lock);
729	}
730	mutex_unlock(&epc->list_lock);
731}
732EXPORT_SYMBOL_GPL(pci_epc_linkdown);
733
734/**
735 * pci_epc_init_notify() - Notify the EPF device that EPC device's core
736 *			   initialization is completed.
737 * @epc: the EPC device whose core initialization is completed
738 *
739 * Invoke to Notify the EPF device that the EPC device's initialization
740 * is completed.
741 */
742void pci_epc_init_notify(struct pci_epc *epc)
743{
744	struct pci_epf *epf;
745
746	if (IS_ERR_OR_NULL(epc))
747		return;
748
749	mutex_lock(&epc->list_lock);
750	list_for_each_entry(epf, &epc->pci_epf, list) {
751		mutex_lock(&epf->lock);
752		if (epf->event_ops && epf->event_ops->core_init)
753			epf->event_ops->core_init(epf);
754		mutex_unlock(&epf->lock);
755	}
756	mutex_unlock(&epc->list_lock);
757}
758EXPORT_SYMBOL_GPL(pci_epc_init_notify);
759
760/**
761 * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
762 *			  the BME event from the Root complex
763 * @epc: the EPC device that received the BME event
764 *
765 * Invoke to Notify the EPF device that the EPC device has received the Bus
766 * Master Enable (BME) event from the Root complex
767 */
768void pci_epc_bme_notify(struct pci_epc *epc)
769{
770	struct pci_epf *epf;
771
772	if (IS_ERR_OR_NULL(epc))
773		return;
774
775	mutex_lock(&epc->list_lock);
776	list_for_each_entry(epf, &epc->pci_epf, list) {
777		mutex_lock(&epf->lock);
778		if (epf->event_ops && epf->event_ops->bme)
779			epf->event_ops->bme(epf);
780		mutex_unlock(&epf->lock);
781	}
782	mutex_unlock(&epc->list_lock);
783}
784EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
785
786/**
787 * pci_epc_destroy() - destroy the EPC device
788 * @epc: the EPC device that has to be destroyed
789 *
790 * Invoke to destroy the PCI EPC device
791 */
792void pci_epc_destroy(struct pci_epc *epc)
793{
794	pci_ep_cfs_remove_epc_group(epc->group);
795	device_unregister(&epc->dev);
 
796}
797EXPORT_SYMBOL_GPL(pci_epc_destroy);
798
799/**
800 * devm_pci_epc_destroy() - destroy the EPC device
801 * @dev: device that wants to destroy the EPC
802 * @epc: the EPC device that has to be destroyed
803 *
804 * Invoke to destroy the devres associated with this
805 * pci_epc and destroy the EPC device.
806 */
807void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
808{
809	int r;
810
811	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
812			   epc);
813	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
814}
815EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
816
817static void pci_epc_release(struct device *dev)
818{
819	kfree(to_pci_epc(dev));
820}
821
822/**
823 * __pci_epc_create() - create a new endpoint controller (EPC) device
824 * @dev: device that is creating the new EPC
825 * @ops: function pointers for performing EPC operations
826 * @owner: the owner of the module that creates the EPC device
827 *
828 * Invoke to create a new EPC device and add it to pci_epc class.
829 */
830struct pci_epc *
831__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
832		 struct module *owner)
833{
834	int ret;
835	struct pci_epc *epc;
836
837	if (WARN_ON(!dev)) {
838		ret = -EINVAL;
839		goto err_ret;
840	}
841
842	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
843	if (!epc) {
844		ret = -ENOMEM;
845		goto err_ret;
846	}
847
848	mutex_init(&epc->lock);
849	mutex_init(&epc->list_lock);
850	INIT_LIST_HEAD(&epc->pci_epf);
851
852	device_initialize(&epc->dev);
853	epc->dev.class = pci_epc_class;
854	epc->dev.parent = dev;
855	epc->dev.release = pci_epc_release;
856	epc->ops = ops;
857
858	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
859	if (ret)
860		goto put_dev;
861
862	ret = device_add(&epc->dev);
863	if (ret)
864		goto put_dev;
865
866	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
867
868	return epc;
869
870put_dev:
871	put_device(&epc->dev);
 
872
873err_ret:
874	return ERR_PTR(ret);
875}
876EXPORT_SYMBOL_GPL(__pci_epc_create);
877
878/**
879 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
880 * @dev: device that is creating the new EPC
881 * @ops: function pointers for performing EPC operations
882 * @owner: the owner of the module that creates the EPC device
883 *
884 * Invoke to create a new EPC device and add it to pci_epc class.
885 * While at that, it also associates the device with the pci_epc using devres.
886 * On driver detach, release function is invoked on the devres data,
887 * then, devres data is freed.
888 */
889struct pci_epc *
890__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
891		      struct module *owner)
892{
893	struct pci_epc **ptr, *epc;
894
895	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
896	if (!ptr)
897		return ERR_PTR(-ENOMEM);
898
899	epc = __pci_epc_create(dev, ops, owner);
900	if (!IS_ERR(epc)) {
901		*ptr = epc;
902		devres_add(dev, ptr);
903	} else {
904		devres_free(ptr);
905	}
906
907	return epc;
908}
909EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
910
911static int __init pci_epc_init(void)
912{
913	pci_epc_class = class_create("pci_epc");
914	if (IS_ERR(pci_epc_class)) {
915		pr_err("failed to create pci epc class --> %ld\n",
916		       PTR_ERR(pci_epc_class));
917		return PTR_ERR(pci_epc_class);
918	}
919
920	return 0;
921}
922module_init(pci_epc_init);
923
924static void __exit pci_epc_exit(void)
925{
926	class_destroy(pci_epc_class);
927}
928module_exit(pci_epc_exit);
929
930MODULE_DESCRIPTION("PCI EPC Library");
931MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
 
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * PCI Endpoint *Controller* (EPC) library
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/of_device.h>
 13
 14#include <linux/pci-epc.h>
 15#include <linux/pci-epf.h>
 16#include <linux/pci-ep-cfs.h>
 17
 18static struct class *pci_epc_class;
 19
 20static void devm_pci_epc_release(struct device *dev, void *res)
 21{
 22	struct pci_epc *epc = *(struct pci_epc **)res;
 23
 24	pci_epc_destroy(epc);
 25}
 26
 27static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
 28{
 29	struct pci_epc **epc = res;
 30
 31	return *epc == match_data;
 32}
 33
 34/**
 35 * pci_epc_put() - release the PCI endpoint controller
 36 * @epc: epc returned by pci_epc_get()
 37 *
 38 * release the refcount the caller obtained by invoking pci_epc_get()
 39 */
 40void pci_epc_put(struct pci_epc *epc)
 41{
 42	if (!epc || IS_ERR(epc))
 43		return;
 44
 45	module_put(epc->ops->owner);
 46	put_device(&epc->dev);
 47}
 48EXPORT_SYMBOL_GPL(pci_epc_put);
 49
 50/**
 51 * pci_epc_get() - get the PCI endpoint controller
 52 * @epc_name: device name of the endpoint controller
 53 *
 54 * Invoke to get struct pci_epc * corresponding to the device name of the
 55 * endpoint controller
 56 */
 57struct pci_epc *pci_epc_get(const char *epc_name)
 58{
 59	int ret = -EINVAL;
 60	struct pci_epc *epc;
 61	struct device *dev;
 62	struct class_dev_iter iter;
 63
 64	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
 65	while ((dev = class_dev_iter_next(&iter))) {
 66		if (strcmp(epc_name, dev_name(dev)))
 67			continue;
 68
 69		epc = to_pci_epc(dev);
 70		if (!try_module_get(epc->ops->owner)) {
 71			ret = -EINVAL;
 72			goto err;
 73		}
 74
 75		class_dev_iter_exit(&iter);
 76		get_device(&epc->dev);
 77		return epc;
 78	}
 79
 80err:
 81	class_dev_iter_exit(&iter);
 82	return ERR_PTR(ret);
 83}
 84EXPORT_SYMBOL_GPL(pci_epc_get);
 85
 86/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87 * pci_epc_stop() - stop the PCI link
 88 * @epc: the link of the EPC device that has to be stopped
 89 *
 90 * Invoke to stop the PCI link
 91 */
 92void pci_epc_stop(struct pci_epc *epc)
 93{
 94	unsigned long flags;
 95
 96	if (IS_ERR(epc) || !epc->ops->stop)
 97		return;
 98
 99	spin_lock_irqsave(&epc->lock, flags);
100	epc->ops->stop(epc);
101	spin_unlock_irqrestore(&epc->lock, flags);
102}
103EXPORT_SYMBOL_GPL(pci_epc_stop);
104
105/**
106 * pci_epc_start() - start the PCI link
107 * @epc: the link of *this* EPC device has to be started
108 *
109 * Invoke to start the PCI link
110 */
111int pci_epc_start(struct pci_epc *epc)
112{
113	int ret;
114	unsigned long flags;
115
116	if (IS_ERR(epc))
117		return -EINVAL;
118
119	if (!epc->ops->start)
120		return 0;
121
122	spin_lock_irqsave(&epc->lock, flags);
123	ret = epc->ops->start(epc);
124	spin_unlock_irqrestore(&epc->lock, flags);
125
126	return ret;
127}
128EXPORT_SYMBOL_GPL(pci_epc_start);
129
130/**
131 * pci_epc_raise_irq() - interrupt the host system
132 * @epc: the EPC device which has to interrupt the host
133 * @func_no: the endpoint function number in the EPC device
134 * @type: specify the type of interrupt; legacy or MSI
135 * @interrupt_num: the MSI interrupt number
 
136 *
137 * Invoke to raise an MSI or legacy interrupt
138 */
139int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
140		      enum pci_epc_irq_type type, u8 interrupt_num)
141{
142	int ret;
143	unsigned long flags;
144
145	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
146		return -EINVAL;
147
 
 
 
148	if (!epc->ops->raise_irq)
149		return 0;
150
151	spin_lock_irqsave(&epc->lock, flags);
152	ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
153	spin_unlock_irqrestore(&epc->lock, flags);
154
155	return ret;
156}
157EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
158
159/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
161 * @epc: the EPC device to which MSI interrupts was requested
162 * @func_no: the endpoint function number in the EPC device
 
163 *
164 * Invoke to get the number of MSI interrupts allocated by the RC
165 */
166int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
167{
168	int interrupt;
169	unsigned long flags;
170
171	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
172		return 0;
173
 
 
 
174	if (!epc->ops->get_msi)
175		return 0;
176
177	spin_lock_irqsave(&epc->lock, flags);
178	interrupt = epc->ops->get_msi(epc, func_no);
179	spin_unlock_irqrestore(&epc->lock, flags);
180
181	if (interrupt < 0)
182		return 0;
183
184	interrupt = 1 << interrupt;
185
186	return interrupt;
187}
188EXPORT_SYMBOL_GPL(pci_epc_get_msi);
189
190/**
191 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
192 * @epc: the EPC device on which MSI has to be configured
193 * @func_no: the endpoint function number in the EPC device
 
194 * @interrupts: number of MSI interrupts required by the EPF
195 *
196 * Invoke to set the required number of MSI interrupts.
197 */
198int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
199{
200	int ret;
201	u8 encode_int;
202	unsigned long flags;
203
204	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
 
 
 
 
205		return -EINVAL;
206
207	if (!epc->ops->set_msi)
208		return 0;
209
210	encode_int = order_base_2(interrupts);
211
212	spin_lock_irqsave(&epc->lock, flags);
213	ret = epc->ops->set_msi(epc, func_no, encode_int);
214	spin_unlock_irqrestore(&epc->lock, flags);
215
216	return ret;
217}
218EXPORT_SYMBOL_GPL(pci_epc_set_msi);
219
220/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221 * pci_epc_unmap_addr() - unmap CPU address from PCI address
222 * @epc: the EPC device on which address is allocated
223 * @func_no: the endpoint function number in the EPC device
 
224 * @phys_addr: physical address of the local system
225 *
226 * Invoke to unmap the CPU address from PCI address.
227 */
228void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
229			phys_addr_t phys_addr)
230{
231	unsigned long flags;
 
232
233	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
234		return;
235
236	if (!epc->ops->unmap_addr)
237		return;
238
239	spin_lock_irqsave(&epc->lock, flags);
240	epc->ops->unmap_addr(epc, func_no, phys_addr);
241	spin_unlock_irqrestore(&epc->lock, flags);
242}
243EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
244
245/**
246 * pci_epc_map_addr() - map CPU address to PCI address
247 * @epc: the EPC device on which address is allocated
248 * @func_no: the endpoint function number in the EPC device
 
249 * @phys_addr: physical address of the local system
250 * @pci_addr: PCI address to which the physical address should be mapped
251 * @size: the size of the allocation
252 *
253 * Invoke to map CPU address with PCI address.
254 */
255int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
256		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
257{
258	int ret;
259	unsigned long flags;
260
261	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
262		return -EINVAL;
263
 
 
 
264	if (!epc->ops->map_addr)
265		return 0;
266
267	spin_lock_irqsave(&epc->lock, flags);
268	ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
269	spin_unlock_irqrestore(&epc->lock, flags);
 
270
271	return ret;
272}
273EXPORT_SYMBOL_GPL(pci_epc_map_addr);
274
275/**
276 * pci_epc_clear_bar() - reset the BAR
277 * @epc: the EPC device for which the BAR has to be cleared
278 * @func_no: the endpoint function number in the EPC device
 
279 * @epf_bar: the struct epf_bar that contains the BAR information
280 *
281 * Invoke to reset the BAR of the endpoint device.
282 */
283void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
284		       struct pci_epf_bar *epf_bar)
285{
286	unsigned long flags;
287
288	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
289	    (epf_bar->barno == BAR_5 &&
290	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
291		return;
292
 
 
 
293	if (!epc->ops->clear_bar)
294		return;
295
296	spin_lock_irqsave(&epc->lock, flags);
297	epc->ops->clear_bar(epc, func_no, epf_bar);
298	spin_unlock_irqrestore(&epc->lock, flags);
299}
300EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
301
302/**
303 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
304 * @epc: the EPC device on which BAR has to be configured
305 * @func_no: the endpoint function number in the EPC device
 
306 * @epf_bar: the struct epf_bar that contains the BAR information
307 *
308 * Invoke to configure the BAR of the endpoint device.
309 */
310int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
311		    struct pci_epf_bar *epf_bar)
312{
313	int ret;
314	unsigned long irq_flags;
315	int flags = epf_bar->flags;
316
317	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
318	    (epf_bar->barno == BAR_5 &&
319	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
320	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
321	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
322	    (upper_32_bits(epf_bar->size) &&
323	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
324		return -EINVAL;
325
 
 
 
326	if (!epc->ops->set_bar)
327		return 0;
328
329	spin_lock_irqsave(&epc->lock, irq_flags);
330	ret = epc->ops->set_bar(epc, func_no, epf_bar);
331	spin_unlock_irqrestore(&epc->lock, irq_flags);
332
333	return ret;
334}
335EXPORT_SYMBOL_GPL(pci_epc_set_bar);
336
337/**
338 * pci_epc_write_header() - write standard configuration header
339 * @epc: the EPC device to which the configuration header should be written
340 * @func_no: the endpoint function number in the EPC device
 
341 * @header: standard configuration header fields
342 *
343 * Invoke to write the configuration header to the endpoint controller. Every
344 * endpoint controller will have a dedicated location to which the standard
345 * configuration header would be written. The callback function should write
346 * the header fields to this dedicated location.
347 */
348int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
349			 struct pci_epf_header *header)
350{
351	int ret;
352	unsigned long flags;
353
354	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
355		return -EINVAL;
356
 
 
 
 
 
 
 
357	if (!epc->ops->write_header)
358		return 0;
359
360	spin_lock_irqsave(&epc->lock, flags);
361	ret = epc->ops->write_header(epc, func_no, header);
362	spin_unlock_irqrestore(&epc->lock, flags);
363
364	return ret;
365}
366EXPORT_SYMBOL_GPL(pci_epc_write_header);
367
368/**
369 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
370 * @epc: the EPC device to which the endpoint function should be added
371 * @epf: the endpoint function to be added
 
 
372 *
373 * A PCI endpoint device can have one or more functions. In the case of PCIe,
374 * the specification allows up to 8 PCIe endpoint functions. Invoke
375 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
376 */
377int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
 
378{
379	unsigned long flags;
 
 
 
 
 
 
 
 
380
381	if (epf->epc)
382		return -EBUSY;
383
384	if (IS_ERR(epc))
385		return -EINVAL;
 
 
 
 
 
386
387	if (epf->func_no > epc->max_functions - 1)
388		return -EINVAL;
 
 
 
389
390	epf->epc = epc;
 
 
 
 
 
 
 
 
 
391
392	spin_lock_irqsave(&epc->lock, flags);
393	list_add_tail(&epf->list, &epc->pci_epf);
394	spin_unlock_irqrestore(&epc->lock, flags);
395
396	return 0;
397}
398EXPORT_SYMBOL_GPL(pci_epc_add_epf);
399
400/**
401 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
402 * @epc: the EPC device from which the endpoint function should be removed
403 * @epf: the endpoint function to be removed
 
 
404 *
405 * Invoke to remove PCI endpoint function from the endpoint controller.
406 */
407void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
 
408{
409	unsigned long flags;
 
410
411	if (!epc || IS_ERR(epc))
412		return;
413
414	spin_lock_irqsave(&epc->lock, flags);
415	list_del(&epf->list);
416	spin_unlock_irqrestore(&epc->lock, flags);
 
 
 
 
 
 
 
 
 
 
417}
418EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
419
420/**
421 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
422 *		      connection with the Root Complex.
423 * @epc: the EPC device which has established link with the host
424 *
425 * Invoke to Notify the EPF device that the EPC device has established a
426 * connection with the Root Complex.
427 */
428void pci_epc_linkup(struct pci_epc *epc)
429{
430	unsigned long flags;
431	struct pci_epf *epf;
432
433	if (!epc || IS_ERR(epc))
434		return;
435
436	spin_lock_irqsave(&epc->lock, flags);
437	list_for_each_entry(epf, &epc->pci_epf, list)
438		pci_epf_linkup(epf);
439	spin_unlock_irqrestore(&epc->lock, flags);
 
 
 
 
440}
441EXPORT_SYMBOL_GPL(pci_epc_linkup);
442
443/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444 * pci_epc_destroy() - destroy the EPC device
445 * @epc: the EPC device that has to be destroyed
446 *
447 * Invoke to destroy the PCI EPC device
448 */
449void pci_epc_destroy(struct pci_epc *epc)
450{
451	pci_ep_cfs_remove_epc_group(epc->group);
452	device_unregister(&epc->dev);
453	kfree(epc);
454}
455EXPORT_SYMBOL_GPL(pci_epc_destroy);
456
457/**
458 * devm_pci_epc_destroy() - destroy the EPC device
459 * @dev: device that wants to destroy the EPC
460 * @epc: the EPC device that has to be destroyed
461 *
462 * Invoke to destroy the devres associated with this
463 * pci_epc and destroy the EPC device.
464 */
465void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
466{
467	int r;
468
469	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
470			   epc);
471	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
472}
473EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
474
 
 
 
 
 
475/**
476 * __pci_epc_create() - create a new endpoint controller (EPC) device
477 * @dev: device that is creating the new EPC
478 * @ops: function pointers for performing EPC operations
479 * @owner: the owner of the module that creates the EPC device
480 *
481 * Invoke to create a new EPC device and add it to pci_epc class.
482 */
483struct pci_epc *
484__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
485		 struct module *owner)
486{
487	int ret;
488	struct pci_epc *epc;
489
490	if (WARN_ON(!dev)) {
491		ret = -EINVAL;
492		goto err_ret;
493	}
494
495	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
496	if (!epc) {
497		ret = -ENOMEM;
498		goto err_ret;
499	}
500
501	spin_lock_init(&epc->lock);
 
502	INIT_LIST_HEAD(&epc->pci_epf);
503
504	device_initialize(&epc->dev);
505	epc->dev.class = pci_epc_class;
506	epc->dev.parent = dev;
 
507	epc->ops = ops;
508
509	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
510	if (ret)
511		goto put_dev;
512
513	ret = device_add(&epc->dev);
514	if (ret)
515		goto put_dev;
516
517	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
518
519	return epc;
520
521put_dev:
522	put_device(&epc->dev);
523	kfree(epc);
524
525err_ret:
526	return ERR_PTR(ret);
527}
528EXPORT_SYMBOL_GPL(__pci_epc_create);
529
530/**
531 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
532 * @dev: device that is creating the new EPC
533 * @ops: function pointers for performing EPC operations
534 * @owner: the owner of the module that creates the EPC device
535 *
536 * Invoke to create a new EPC device and add it to pci_epc class.
537 * While at that, it also associates the device with the pci_epc using devres.
538 * On driver detach, release function is invoked on the devres data,
539 * then, devres data is freed.
540 */
541struct pci_epc *
542__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
543		      struct module *owner)
544{
545	struct pci_epc **ptr, *epc;
546
547	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
548	if (!ptr)
549		return ERR_PTR(-ENOMEM);
550
551	epc = __pci_epc_create(dev, ops, owner);
552	if (!IS_ERR(epc)) {
553		*ptr = epc;
554		devres_add(dev, ptr);
555	} else {
556		devres_free(ptr);
557	}
558
559	return epc;
560}
561EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
562
563static int __init pci_epc_init(void)
564{
565	pci_epc_class = class_create(THIS_MODULE, "pci_epc");
566	if (IS_ERR(pci_epc_class)) {
567		pr_err("failed to create pci epc class --> %ld\n",
568		       PTR_ERR(pci_epc_class));
569		return PTR_ERR(pci_epc_class);
570	}
571
572	return 0;
573}
574module_init(pci_epc_init);
575
576static void __exit pci_epc_exit(void)
577{
578	class_destroy(pci_epc_class);
579}
580module_exit(pci_epc_exit);
581
582MODULE_DESCRIPTION("PCI EPC Library");
583MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
584MODULE_LICENSE("GPL v2");