Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * PCI Endpoint *Controller* (EPC) library
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/slab.h>
 11#include <linux/module.h>
 12#include <linux/of_device.h>
 13
 14#include <linux/pci-epc.h>
 15#include <linux/pci-epf.h>
 16#include <linux/pci-ep-cfs.h>
 17
 18static struct class *pci_epc_class;
 
 
 19
 20static void devm_pci_epc_release(struct device *dev, void *res)
 21{
 22	struct pci_epc *epc = *(struct pci_epc **)res;
 23
 24	pci_epc_destroy(epc);
 25}
 26
 27static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
 28{
 29	struct pci_epc **epc = res;
 30
 31	return *epc == match_data;
 32}
 33
 34/**
 35 * pci_epc_put() - release the PCI endpoint controller
 36 * @epc: epc returned by pci_epc_get()
 37 *
 38 * release the refcount the caller obtained by invoking pci_epc_get()
 39 */
 40void pci_epc_put(struct pci_epc *epc)
 41{
 42	if (!epc || IS_ERR(epc))
 43		return;
 44
 45	module_put(epc->ops->owner);
 46	put_device(&epc->dev);
 47}
 48EXPORT_SYMBOL_GPL(pci_epc_put);
 49
 50/**
 51 * pci_epc_get() - get the PCI endpoint controller
 52 * @epc_name: device name of the endpoint controller
 53 *
 54 * Invoke to get struct pci_epc * corresponding to the device name of the
 55 * endpoint controller
 56 */
 57struct pci_epc *pci_epc_get(const char *epc_name)
 58{
 59	int ret = -EINVAL;
 60	struct pci_epc *epc;
 61	struct device *dev;
 62	struct class_dev_iter iter;
 63
 64	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
 65	while ((dev = class_dev_iter_next(&iter))) {
 66		if (strcmp(epc_name, dev_name(dev)))
 67			continue;
 68
 69		epc = to_pci_epc(dev);
 70		if (!try_module_get(epc->ops->owner)) {
 71			ret = -EINVAL;
 72			goto err;
 73		}
 74
 75		class_dev_iter_exit(&iter);
 76		get_device(&epc->dev);
 77		return epc;
 78	}
 79
 80err:
 81	class_dev_iter_exit(&iter);
 82	return ERR_PTR(ret);
 83}
 84EXPORT_SYMBOL_GPL(pci_epc_get);
 85
 86/**
 87 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
 88 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
 89 *
 90 * Invoke to get the first unreserved BAR that can be used for endpoint
 91 * function. For any incorrect value in reserved_bar return '0'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92 */
 93unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
 94					*epc_features)
 95{
 96	int free_bar;
 97
 98	if (!epc_features)
 99		return 0;
100
101	free_bar = ffz(epc_features->reserved_bar);
102	if (free_bar > 5)
103		return 0;
 
 
 
 
 
 
104
105	return free_bar;
 
 
 
 
 
 
 
 
 
 
 
 
 
106}
107EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
108
109/**
110 * pci_epc_get_features() - get the features supported by EPC
111 * @epc: the features supported by *this* EPC device will be returned
112 * @func_no: the features supported by the EPC device specific to the
113 *	     endpoint function with func_no will be returned
 
 
114 *
115 * Invoke to get the features provided by the EPC which may be
116 * specific to an endpoint function. Returns pci_epc_features on success
117 * and NULL for any failures.
118 */
119const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
120						    u8 func_no)
121{
122	const struct pci_epc_features *epc_features;
123	unsigned long flags;
124
125	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
126		return NULL;
127
128	if (!epc->ops->get_features)
129		return NULL;
130
131	spin_lock_irqsave(&epc->lock, flags);
132	epc_features = epc->ops->get_features(epc, func_no);
133	spin_unlock_irqrestore(&epc->lock, flags);
134
135	return epc_features;
136}
137EXPORT_SYMBOL_GPL(pci_epc_get_features);
138
139/**
140 * pci_epc_stop() - stop the PCI link
141 * @epc: the link of the EPC device that has to be stopped
142 *
143 * Invoke to stop the PCI link
144 */
145void pci_epc_stop(struct pci_epc *epc)
146{
147	unsigned long flags;
148
149	if (IS_ERR(epc) || !epc->ops->stop)
150		return;
151
152	spin_lock_irqsave(&epc->lock, flags);
153	epc->ops->stop(epc);
154	spin_unlock_irqrestore(&epc->lock, flags);
155}
156EXPORT_SYMBOL_GPL(pci_epc_stop);
157
158/**
159 * pci_epc_start() - start the PCI link
160 * @epc: the link of *this* EPC device has to be started
161 *
162 * Invoke to start the PCI link
163 */
164int pci_epc_start(struct pci_epc *epc)
165{
166	int ret;
167	unsigned long flags;
168
169	if (IS_ERR(epc))
170		return -EINVAL;
171
172	if (!epc->ops->start)
173		return 0;
174
175	spin_lock_irqsave(&epc->lock, flags);
176	ret = epc->ops->start(epc);
177	spin_unlock_irqrestore(&epc->lock, flags);
178
179	return ret;
180}
181EXPORT_SYMBOL_GPL(pci_epc_start);
182
183/**
184 * pci_epc_raise_irq() - interrupt the host system
185 * @epc: the EPC device which has to interrupt the host
186 * @func_no: the endpoint function number in the EPC device
187 * @type: specify the type of interrupt; legacy, MSI or MSI-X
188 * @interrupt_num: the MSI or MSI-X interrupt number
 
189 *
190 * Invoke to raise an legacy, MSI or MSI-X interrupt
191 */
192int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
193		      enum pci_epc_irq_type type, u16 interrupt_num)
194{
195	int ret;
196	unsigned long flags;
197
198	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
199		return -EINVAL;
200
201	if (!epc->ops->raise_irq)
202		return 0;
203
204	spin_lock_irqsave(&epc->lock, flags);
205	ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
206	spin_unlock_irqrestore(&epc->lock, flags);
207
208	return ret;
209}
210EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
211
212/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
214 * @epc: the EPC device to which MSI interrupts was requested
215 * @func_no: the endpoint function number in the EPC device
 
216 *
217 * Invoke to get the number of MSI interrupts allocated by the RC
218 */
219int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
220{
221	int interrupt;
222	unsigned long flags;
223
224	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
225		return 0;
226
227	if (!epc->ops->get_msi)
228		return 0;
229
230	spin_lock_irqsave(&epc->lock, flags);
231	interrupt = epc->ops->get_msi(epc, func_no);
232	spin_unlock_irqrestore(&epc->lock, flags);
233
234	if (interrupt < 0)
235		return 0;
236
237	interrupt = 1 << interrupt;
238
239	return interrupt;
240}
241EXPORT_SYMBOL_GPL(pci_epc_get_msi);
242
243/**
244 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
245 * @epc: the EPC device on which MSI has to be configured
246 * @func_no: the endpoint function number in the EPC device
 
247 * @interrupts: number of MSI interrupts required by the EPF
248 *
249 * Invoke to set the required number of MSI interrupts.
250 */
251int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
252{
253	int ret;
254	u8 encode_int;
255	unsigned long flags;
256
257	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
258	    interrupts > 32)
 
 
259		return -EINVAL;
260
261	if (!epc->ops->set_msi)
262		return 0;
263
264	encode_int = order_base_2(interrupts);
265
266	spin_lock_irqsave(&epc->lock, flags);
267	ret = epc->ops->set_msi(epc, func_no, encode_int);
268	spin_unlock_irqrestore(&epc->lock, flags);
269
270	return ret;
271}
272EXPORT_SYMBOL_GPL(pci_epc_set_msi);
273
274/**
275 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
276 * @epc: the EPC device to which MSI-X interrupts was requested
277 * @func_no: the endpoint function number in the EPC device
 
278 *
279 * Invoke to get the number of MSI-X interrupts allocated by the RC
280 */
281int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
282{
283	int interrupt;
284	unsigned long flags;
285
286	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
287		return 0;
288
289	if (!epc->ops->get_msix)
290		return 0;
291
292	spin_lock_irqsave(&epc->lock, flags);
293	interrupt = epc->ops->get_msix(epc, func_no);
294	spin_unlock_irqrestore(&epc->lock, flags);
295
296	if (interrupt < 0)
297		return 0;
298
299	return interrupt + 1;
300}
301EXPORT_SYMBOL_GPL(pci_epc_get_msix);
302
303/**
304 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
305 * @epc: the EPC device on which MSI-X has to be configured
306 * @func_no: the endpoint function number in the EPC device
 
307 * @interrupts: number of MSI-X interrupts required by the EPF
 
 
308 *
309 * Invoke to set the required number of MSI-X interrupts.
310 */
311int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
 
312{
313	int ret;
314	unsigned long flags;
315
316	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
317	    interrupts < 1 || interrupts > 2048)
 
 
318		return -EINVAL;
319
320	if (!epc->ops->set_msix)
321		return 0;
322
323	spin_lock_irqsave(&epc->lock, flags);
324	ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
325	spin_unlock_irqrestore(&epc->lock, flags);
 
326
327	return ret;
328}
329EXPORT_SYMBOL_GPL(pci_epc_set_msix);
330
331/**
332 * pci_epc_unmap_addr() - unmap CPU address from PCI address
333 * @epc: the EPC device on which address is allocated
334 * @func_no: the endpoint function number in the EPC device
 
335 * @phys_addr: physical address of the local system
336 *
337 * Invoke to unmap the CPU address from PCI address.
338 */
339void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
340			phys_addr_t phys_addr)
341{
342	unsigned long flags;
343
344	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
345		return;
346
347	if (!epc->ops->unmap_addr)
348		return;
349
350	spin_lock_irqsave(&epc->lock, flags);
351	epc->ops->unmap_addr(epc, func_no, phys_addr);
352	spin_unlock_irqrestore(&epc->lock, flags);
353}
354EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
355
356/**
357 * pci_epc_map_addr() - map CPU address to PCI address
358 * @epc: the EPC device on which address is allocated
359 * @func_no: the endpoint function number in the EPC device
 
360 * @phys_addr: physical address of the local system
361 * @pci_addr: PCI address to which the physical address should be mapped
362 * @size: the size of the allocation
363 *
364 * Invoke to map CPU address with PCI address.
365 */
366int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
367		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
368{
369	int ret;
370	unsigned long flags;
371
372	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
373		return -EINVAL;
374
375	if (!epc->ops->map_addr)
376		return 0;
377
378	spin_lock_irqsave(&epc->lock, flags);
379	ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
380	spin_unlock_irqrestore(&epc->lock, flags);
 
381
382	return ret;
383}
384EXPORT_SYMBOL_GPL(pci_epc_map_addr);
385
386/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387 * pci_epc_clear_bar() - reset the BAR
388 * @epc: the EPC device for which the BAR has to be cleared
389 * @func_no: the endpoint function number in the EPC device
 
390 * @epf_bar: the struct epf_bar that contains the BAR information
391 *
392 * Invoke to reset the BAR of the endpoint device.
393 */
394void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
395		       struct pci_epf_bar *epf_bar)
396{
397	unsigned long flags;
 
398
399	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
400	    (epf_bar->barno == BAR_5 &&
401	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
402		return;
403
404	if (!epc->ops->clear_bar)
405		return;
406
407	spin_lock_irqsave(&epc->lock, flags);
408	epc->ops->clear_bar(epc, func_no, epf_bar);
409	spin_unlock_irqrestore(&epc->lock, flags);
410}
411EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
412
413/**
414 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
415 * @epc: the EPC device on which BAR has to be configured
416 * @func_no: the endpoint function number in the EPC device
 
417 * @epf_bar: the struct epf_bar that contains the BAR information
418 *
419 * Invoke to configure the BAR of the endpoint device.
420 */
421int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
422		    struct pci_epf_bar *epf_bar)
423{
424	int ret;
425	unsigned long irq_flags;
426	int flags = epf_bar->flags;
 
 
 
 
 
427
428	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
429	    (epf_bar->barno == BAR_5 &&
430	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
 
 
431	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
432	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
433	    (upper_32_bits(epf_bar->size) &&
434	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
435		return -EINVAL;
436
437	if (!epc->ops->set_bar)
438		return 0;
439
440	spin_lock_irqsave(&epc->lock, irq_flags);
441	ret = epc->ops->set_bar(epc, func_no, epf_bar);
442	spin_unlock_irqrestore(&epc->lock, irq_flags);
443
444	return ret;
445}
446EXPORT_SYMBOL_GPL(pci_epc_set_bar);
447
448/**
449 * pci_epc_write_header() - write standard configuration header
450 * @epc: the EPC device to which the configuration header should be written
451 * @func_no: the endpoint function number in the EPC device
 
452 * @header: standard configuration header fields
453 *
454 * Invoke to write the configuration header to the endpoint controller. Every
455 * endpoint controller will have a dedicated location to which the standard
456 * configuration header would be written. The callback function should write
457 * the header fields to this dedicated location.
458 */
459int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
460			 struct pci_epf_header *header)
461{
462	int ret;
463	unsigned long flags;
464
465	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
 
 
 
 
466		return -EINVAL;
467
468	if (!epc->ops->write_header)
469		return 0;
470
471	spin_lock_irqsave(&epc->lock, flags);
472	ret = epc->ops->write_header(epc, func_no, header);
473	spin_unlock_irqrestore(&epc->lock, flags);
474
475	return ret;
476}
477EXPORT_SYMBOL_GPL(pci_epc_write_header);
478
479/**
480 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
481 * @epc: the EPC device to which the endpoint function should be added
482 * @epf: the endpoint function to be added
 
 
483 *
484 * A PCI endpoint device can have one or more functions. In the case of PCIe,
485 * the specification allows up to 8 PCIe endpoint functions. Invoke
486 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
487 */
488int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
 
489{
490	unsigned long flags;
 
 
 
 
 
491
492	if (epf->epc)
493		return -EBUSY;
494
495	if (IS_ERR(epc))
496		return -EINVAL;
497
498	if (epf->func_no > epc->max_functions - 1)
499		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
500
501	epf->epc = epc;
 
 
 
 
 
 
 
 
 
502
503	spin_lock_irqsave(&epc->lock, flags);
504	list_add_tail(&epf->list, &epc->pci_epf);
505	spin_unlock_irqrestore(&epc->lock, flags);
506
507	return 0;
508}
509EXPORT_SYMBOL_GPL(pci_epc_add_epf);
510
511/**
512 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
513 * @epc: the EPC device from which the endpoint function should be removed
514 * @epf: the endpoint function to be removed
 
 
515 *
516 * Invoke to remove PCI endpoint function from the endpoint controller.
517 */
518void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
 
519{
520	unsigned long flags;
 
521
522	if (!epc || IS_ERR(epc) || !epf)
523		return;
524
525	spin_lock_irqsave(&epc->lock, flags);
526	list_del(&epf->list);
527	epf->epc = NULL;
528	spin_unlock_irqrestore(&epc->lock, flags);
 
 
 
 
 
 
 
 
 
529}
530EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
531
532/**
533 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
534 *		      connection with the Root Complex.
535 * @epc: the EPC device which has established link with the host
536 *
537 * Invoke to Notify the EPF device that the EPC device has established a
538 * connection with the Root Complex.
539 */
540void pci_epc_linkup(struct pci_epc *epc)
541{
542	unsigned long flags;
543	struct pci_epf *epf;
544
545	if (!epc || IS_ERR(epc))
546		return;
547
548	spin_lock_irqsave(&epc->lock, flags);
549	list_for_each_entry(epf, &epc->pci_epf, list)
550		pci_epf_linkup(epf);
551	spin_unlock_irqrestore(&epc->lock, flags);
 
 
 
 
552}
553EXPORT_SYMBOL_GPL(pci_epc_linkup);
554
555/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556 * pci_epc_destroy() - destroy the EPC device
557 * @epc: the EPC device that has to be destroyed
558 *
559 * Invoke to destroy the PCI EPC device
560 */
561void pci_epc_destroy(struct pci_epc *epc)
562{
563	pci_ep_cfs_remove_epc_group(epc->group);
 
 
 
564	device_unregister(&epc->dev);
565	kfree(epc);
566}
567EXPORT_SYMBOL_GPL(pci_epc_destroy);
568
569/**
570 * devm_pci_epc_destroy() - destroy the EPC device
571 * @dev: device that wants to destroy the EPC
572 * @epc: the EPC device that has to be destroyed
573 *
574 * Invoke to destroy the devres associated with this
575 * pci_epc and destroy the EPC device.
576 */
577void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
578{
579	int r;
580
581	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
582			   epc);
583	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
584}
585EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
586
 
 
 
 
 
587/**
588 * __pci_epc_create() - create a new endpoint controller (EPC) device
589 * @dev: device that is creating the new EPC
590 * @ops: function pointers for performing EPC operations
591 * @owner: the owner of the module that creates the EPC device
592 *
593 * Invoke to create a new EPC device and add it to pci_epc class.
594 */
595struct pci_epc *
596__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
597		 struct module *owner)
598{
599	int ret;
600	struct pci_epc *epc;
601
602	if (WARN_ON(!dev)) {
603		ret = -EINVAL;
604		goto err_ret;
605	}
606
607	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
608	if (!epc) {
609		ret = -ENOMEM;
610		goto err_ret;
611	}
612
613	spin_lock_init(&epc->lock);
 
614	INIT_LIST_HEAD(&epc->pci_epf);
615
616	device_initialize(&epc->dev);
617	epc->dev.class = pci_epc_class;
618	epc->dev.parent = dev;
 
619	epc->ops = ops;
620
 
 
 
 
 
 
 
 
 
 
621	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
622	if (ret)
623		goto put_dev;
624
625	ret = device_add(&epc->dev);
626	if (ret)
627		goto put_dev;
628
629	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
630
631	return epc;
632
633put_dev:
634	put_device(&epc->dev);
635	kfree(epc);
636
637err_ret:
638	return ERR_PTR(ret);
639}
640EXPORT_SYMBOL_GPL(__pci_epc_create);
641
642/**
643 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
644 * @dev: device that is creating the new EPC
645 * @ops: function pointers for performing EPC operations
646 * @owner: the owner of the module that creates the EPC device
647 *
648 * Invoke to create a new EPC device and add it to pci_epc class.
649 * While at that, it also associates the device with the pci_epc using devres.
650 * On driver detach, release function is invoked on the devres data,
651 * then, devres data is freed.
652 */
653struct pci_epc *
654__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
655		      struct module *owner)
656{
657	struct pci_epc **ptr, *epc;
658
659	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
660	if (!ptr)
661		return ERR_PTR(-ENOMEM);
662
663	epc = __pci_epc_create(dev, ops, owner);
664	if (!IS_ERR(epc)) {
665		*ptr = epc;
666		devres_add(dev, ptr);
667	} else {
668		devres_free(ptr);
669	}
670
671	return epc;
672}
673EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
674
675static int __init pci_epc_init(void)
676{
677	pci_epc_class = class_create(THIS_MODULE, "pci_epc");
678	if (IS_ERR(pci_epc_class)) {
679		pr_err("failed to create pci epc class --> %ld\n",
680		       PTR_ERR(pci_epc_class));
681		return PTR_ERR(pci_epc_class);
682	}
683
684	return 0;
685}
686module_init(pci_epc_init);
687
688static void __exit pci_epc_exit(void)
689{
690	class_destroy(pci_epc_class);
691}
692module_exit(pci_epc_exit);
693
694MODULE_DESCRIPTION("PCI EPC Library");
695MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
696MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Endpoint *Controller* (EPC) library
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11#include <linux/module.h>
 
  12
  13#include <linux/pci-epc.h>
  14#include <linux/pci-epf.h>
  15#include <linux/pci-ep-cfs.h>
  16
  17static const struct class pci_epc_class = {
  18	.name = "pci_epc",
  19};
  20
  21static void devm_pci_epc_release(struct device *dev, void *res)
  22{
  23	struct pci_epc *epc = *(struct pci_epc **)res;
  24
  25	pci_epc_destroy(epc);
  26}
  27
  28static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
  29{
  30	struct pci_epc **epc = res;
  31
  32	return *epc == match_data;
  33}
  34
  35/**
  36 * pci_epc_put() - release the PCI endpoint controller
  37 * @epc: epc returned by pci_epc_get()
  38 *
  39 * release the refcount the caller obtained by invoking pci_epc_get()
  40 */
  41void pci_epc_put(struct pci_epc *epc)
  42{
  43	if (IS_ERR_OR_NULL(epc))
  44		return;
  45
  46	module_put(epc->ops->owner);
  47	put_device(&epc->dev);
  48}
  49EXPORT_SYMBOL_GPL(pci_epc_put);
  50
  51/**
  52 * pci_epc_get() - get the PCI endpoint controller
  53 * @epc_name: device name of the endpoint controller
  54 *
  55 * Invoke to get struct pci_epc * corresponding to the device name of the
  56 * endpoint controller
  57 */
  58struct pci_epc *pci_epc_get(const char *epc_name)
  59{
  60	int ret = -EINVAL;
  61	struct pci_epc *epc;
  62	struct device *dev;
  63	struct class_dev_iter iter;
  64
  65	class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
  66	while ((dev = class_dev_iter_next(&iter))) {
  67		if (strcmp(epc_name, dev_name(dev)))
  68			continue;
  69
  70		epc = to_pci_epc(dev);
  71		if (!try_module_get(epc->ops->owner)) {
  72			ret = -EINVAL;
  73			goto err;
  74		}
  75
  76		class_dev_iter_exit(&iter);
  77		get_device(&epc->dev);
  78		return epc;
  79	}
  80
  81err:
  82	class_dev_iter_exit(&iter);
  83	return ERR_PTR(ret);
  84}
  85EXPORT_SYMBOL_GPL(pci_epc_get);
  86
  87/**
  88 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
  89 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
  90 *
  91 * Invoke to get the first unreserved BAR that can be used by the endpoint
  92 * function.
  93 */
  94enum pci_barno
  95pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
  96{
  97	return pci_epc_get_next_free_bar(epc_features, BAR_0);
  98}
  99EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
 100
 101/**
 102 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
 103 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
 104 * @bar: the starting BAR number from where unreserved BAR should be searched
 105 *
 106 * Invoke to get the next unreserved BAR starting from @bar that can be used
 107 * for endpoint function.
 108 */
 109enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
 110					 *epc_features, enum pci_barno bar)
 111{
 112	int i;
 113
 114	if (!epc_features)
 115		return BAR_0;
 116
 117	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
 118	if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
 119		bar++;
 120
 121	for (i = bar; i < PCI_STD_NUM_BARS; i++) {
 122		/* If the BAR is not reserved, return it. */
 123		if (epc_features->bar[i].type != BAR_RESERVED)
 124			return i;
 125	}
 126
 127	return NO_BAR;
 128}
 129EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
 130
 131static bool pci_epc_function_is_valid(struct pci_epc *epc,
 132				      u8 func_no, u8 vfunc_no)
 133{
 134	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
 135		return false;
 136
 137	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
 138		return false;
 139
 140	return true;
 141}
 
 142
 143/**
 144 * pci_epc_get_features() - get the features supported by EPC
 145 * @epc: the features supported by *this* EPC device will be returned
 146 * @func_no: the features supported by the EPC device specific to the
 147 *	     endpoint function with func_no will be returned
 148 * @vfunc_no: the features supported by the EPC device specific to the
 149 *	     virtual endpoint function with vfunc_no will be returned
 150 *
 151 * Invoke to get the features provided by the EPC which may be
 152 * specific to an endpoint function. Returns pci_epc_features on success
 153 * and NULL for any failures.
 154 */
 155const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
 156						    u8 func_no, u8 vfunc_no)
 157{
 158	const struct pci_epc_features *epc_features;
 
 159
 160	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 161		return NULL;
 162
 163	if (!epc->ops->get_features)
 164		return NULL;
 165
 166	mutex_lock(&epc->lock);
 167	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
 168	mutex_unlock(&epc->lock);
 169
 170	return epc_features;
 171}
 172EXPORT_SYMBOL_GPL(pci_epc_get_features);
 173
 174/**
 175 * pci_epc_stop() - stop the PCI link
 176 * @epc: the link of the EPC device that has to be stopped
 177 *
 178 * Invoke to stop the PCI link
 179 */
 180void pci_epc_stop(struct pci_epc *epc)
 181{
 
 
 182	if (IS_ERR(epc) || !epc->ops->stop)
 183		return;
 184
 185	mutex_lock(&epc->lock);
 186	epc->ops->stop(epc);
 187	mutex_unlock(&epc->lock);
 188}
 189EXPORT_SYMBOL_GPL(pci_epc_stop);
 190
 191/**
 192 * pci_epc_start() - start the PCI link
 193 * @epc: the link of *this* EPC device has to be started
 194 *
 195 * Invoke to start the PCI link
 196 */
 197int pci_epc_start(struct pci_epc *epc)
 198{
 199	int ret;
 
 200
 201	if (IS_ERR(epc))
 202		return -EINVAL;
 203
 204	if (!epc->ops->start)
 205		return 0;
 206
 207	mutex_lock(&epc->lock);
 208	ret = epc->ops->start(epc);
 209	mutex_unlock(&epc->lock);
 210
 211	return ret;
 212}
 213EXPORT_SYMBOL_GPL(pci_epc_start);
 214
 215/**
 216 * pci_epc_raise_irq() - interrupt the host system
 217 * @epc: the EPC device which has to interrupt the host
 218 * @func_no: the physical endpoint function number in the EPC device
 219 * @vfunc_no: the virtual endpoint function number in the physical function
 220 * @type: specify the type of interrupt; INTX, MSI or MSI-X
 221 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
 222 *
 223 * Invoke to raise an INTX, MSI or MSI-X interrupt
 224 */
 225int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 226		      unsigned int type, u16 interrupt_num)
 227{
 228	int ret;
 
 229
 230	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 231		return -EINVAL;
 232
 233	if (!epc->ops->raise_irq)
 234		return 0;
 235
 236	mutex_lock(&epc->lock);
 237	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
 238	mutex_unlock(&epc->lock);
 239
 240	return ret;
 241}
 242EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
 243
 244/**
 245 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
 246 *                         MSI data
 247 * @epc: the EPC device which has the MSI capability
 248 * @func_no: the physical endpoint function number in the EPC device
 249 * @vfunc_no: the virtual endpoint function number in the physical function
 250 * @phys_addr: the physical address of the outbound region
 251 * @interrupt_num: the MSI interrupt number with range (1-N)
 252 * @entry_size: Size of Outbound address region for each interrupt
 253 * @msi_data: the data that should be written in order to raise MSI interrupt
 254 *            with interrupt number as 'interrupt num'
 255 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
 256 *                   to which the MSI address is mapped
 257 *
 258 * Invoke to map physical address to MSI address and return MSI data. The
 259 * physical address should be an address in the outbound region. This is
 260 * required to implement doorbell functionality of NTB wherein EPC on either
 261 * side of the interface (primary and secondary) can directly write to the
 262 * physical address (in outbound region) of the other interface to ring
 263 * doorbell.
 264 */
 265int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 266			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
 267			u32 *msi_data, u32 *msi_addr_offset)
 268{
 269	int ret;
 270
 271	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 272		return -EINVAL;
 273
 274	if (!epc->ops->map_msi_irq)
 275		return -EINVAL;
 276
 277	mutex_lock(&epc->lock);
 278	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
 279				    interrupt_num, entry_size, msi_data,
 280				    msi_addr_offset);
 281	mutex_unlock(&epc->lock);
 282
 283	return ret;
 284}
 285EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
 286
 287/**
 288 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
 289 * @epc: the EPC device to which MSI interrupts was requested
 290 * @func_no: the physical endpoint function number in the EPC device
 291 * @vfunc_no: the virtual endpoint function number in the physical function
 292 *
 293 * Invoke to get the number of MSI interrupts allocated by the RC
 294 */
 295int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
 296{
 297	int interrupt;
 
 298
 299	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 300		return 0;
 301
 302	if (!epc->ops->get_msi)
 303		return 0;
 304
 305	mutex_lock(&epc->lock);
 306	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
 307	mutex_unlock(&epc->lock);
 308
 309	if (interrupt < 0)
 310		return 0;
 311
 312	interrupt = 1 << interrupt;
 313
 314	return interrupt;
 315}
 316EXPORT_SYMBOL_GPL(pci_epc_get_msi);
 317
 318/**
 319 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
 320 * @epc: the EPC device on which MSI has to be configured
 321 * @func_no: the physical endpoint function number in the EPC device
 322 * @vfunc_no: the virtual endpoint function number in the physical function
 323 * @interrupts: number of MSI interrupts required by the EPF
 324 *
 325 * Invoke to set the required number of MSI interrupts.
 326 */
 327int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
 328{
 329	int ret;
 330	u8 encode_int;
 
 331
 332	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 333		return -EINVAL;
 334
 335	if (interrupts < 1 || interrupts > 32)
 336		return -EINVAL;
 337
 338	if (!epc->ops->set_msi)
 339		return 0;
 340
 341	encode_int = order_base_2(interrupts);
 342
 343	mutex_lock(&epc->lock);
 344	ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
 345	mutex_unlock(&epc->lock);
 346
 347	return ret;
 348}
 349EXPORT_SYMBOL_GPL(pci_epc_set_msi);
 350
 351/**
 352 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
 353 * @epc: the EPC device to which MSI-X interrupts was requested
 354 * @func_no: the physical endpoint function number in the EPC device
 355 * @vfunc_no: the virtual endpoint function number in the physical function
 356 *
 357 * Invoke to get the number of MSI-X interrupts allocated by the RC
 358 */
 359int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
 360{
 361	int interrupt;
 
 362
 363	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 364		return 0;
 365
 366	if (!epc->ops->get_msix)
 367		return 0;
 368
 369	mutex_lock(&epc->lock);
 370	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
 371	mutex_unlock(&epc->lock);
 372
 373	if (interrupt < 0)
 374		return 0;
 375
 376	return interrupt + 1;
 377}
 378EXPORT_SYMBOL_GPL(pci_epc_get_msix);
 379
 380/**
 381 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
 382 * @epc: the EPC device on which MSI-X has to be configured
 383 * @func_no: the physical endpoint function number in the EPC device
 384 * @vfunc_no: the virtual endpoint function number in the physical function
 385 * @interrupts: number of MSI-X interrupts required by the EPF
 386 * @bir: BAR where the MSI-X table resides
 387 * @offset: Offset pointing to the start of MSI-X table
 388 *
 389 * Invoke to set the required number of MSI-X interrupts.
 390 */
 391int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 392		     u16 interrupts, enum pci_barno bir, u32 offset)
 393{
 394	int ret;
 
 395
 396	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 397		return -EINVAL;
 398
 399	if (interrupts < 1 || interrupts > 2048)
 400		return -EINVAL;
 401
 402	if (!epc->ops->set_msix)
 403		return 0;
 404
 405	mutex_lock(&epc->lock);
 406	ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
 407				 offset);
 408	mutex_unlock(&epc->lock);
 409
 410	return ret;
 411}
 412EXPORT_SYMBOL_GPL(pci_epc_set_msix);
 413
 414/**
 415 * pci_epc_unmap_addr() - unmap CPU address from PCI address
 416 * @epc: the EPC device on which address is allocated
 417 * @func_no: the physical endpoint function number in the EPC device
 418 * @vfunc_no: the virtual endpoint function number in the physical function
 419 * @phys_addr: physical address of the local system
 420 *
 421 * Invoke to unmap the CPU address from PCI address.
 422 */
 423void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 424			phys_addr_t phys_addr)
 425{
 426	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 
 
 427		return;
 428
 429	if (!epc->ops->unmap_addr)
 430		return;
 431
 432	mutex_lock(&epc->lock);
 433	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
 434	mutex_unlock(&epc->lock);
 435}
 436EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
 437
 438/**
 439 * pci_epc_map_addr() - map CPU address to PCI address
 440 * @epc: the EPC device on which address is allocated
 441 * @func_no: the physical endpoint function number in the EPC device
 442 * @vfunc_no: the virtual endpoint function number in the physical function
 443 * @phys_addr: physical address of the local system
 444 * @pci_addr: PCI address to which the physical address should be mapped
 445 * @size: the size of the allocation
 446 *
 447 * Invoke to map CPU address with PCI address.
 448 */
 449int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 450		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
 451{
 452	int ret;
 
 453
 454	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 455		return -EINVAL;
 456
 457	if (!epc->ops->map_addr)
 458		return 0;
 459
 460	mutex_lock(&epc->lock);
 461	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
 462				 size);
 463	mutex_unlock(&epc->lock);
 464
 465	return ret;
 466}
 467EXPORT_SYMBOL_GPL(pci_epc_map_addr);
 468
 469/**
 470 * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
 471 * @epc: the EPC device on which the CPU address is to be allocated and mapped
 472 * @func_no: the physical endpoint function number in the EPC device
 473 * @vfunc_no: the virtual endpoint function number in the physical function
 474 * @pci_addr: PCI address to which the CPU address should be mapped
 475 * @pci_size: the number of bytes to map starting from @pci_addr
 476 * @map: where to return the mapping information
 477 *
 478 * Allocate a controller memory address region and map it to a RC PCI address
 479 * region, taking into account the controller physical address mapping
 480 * constraints using the controller operation align_addr(). If this operation is
 481 * not defined, we assume that there are no alignment constraints for the
 482 * mapping.
 483 *
 484 * The effective size of the PCI address range mapped from @pci_addr is
 485 * indicated by @map->pci_size. This size may be less than the requested
 486 * @pci_size. The local virtual CPU address for the mapping is indicated by
 487 * @map->virt_addr (@map->phys_addr indicates the physical address).
 488 * The size and CPU address of the controller memory allocated and mapped are
 489 * respectively indicated by @map->map_size and @map->virt_base (and
 490 * @map->phys_base for the physical address of @map->virt_base).
 491 *
 492 * Returns 0 on success and a negative error code in case of error.
 493 */
 494int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 495		    u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
 496{
 497	size_t map_size = pci_size;
 498	size_t map_offset = 0;
 499	int ret;
 500
 501	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 502		return -EINVAL;
 503
 504	if (!pci_size || !map)
 505		return -EINVAL;
 506
 507	/*
 508	 * Align the PCI address to map. If the controller defines the
 509	 * .align_addr() operation, use it to determine the PCI address to map
 510	 * and the size of the mapping. Otherwise, assume that the controller
 511	 * has no alignment constraint.
 512	 */
 513	memset(map, 0, sizeof(*map));
 514	map->pci_addr = pci_addr;
 515	if (epc->ops->align_addr)
 516		map->map_pci_addr =
 517			epc->ops->align_addr(epc, pci_addr,
 518					     &map_size, &map_offset);
 519	else
 520		map->map_pci_addr = pci_addr;
 521	map->map_size = map_size;
 522	if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
 523		map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
 524	else
 525		map->pci_size = pci_size;
 526
 527	map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
 528						map->map_size);
 529	if (!map->virt_base)
 530		return -ENOMEM;
 531
 532	map->phys_addr = map->phys_base + map_offset;
 533	map->virt_addr = map->virt_base + map_offset;
 534
 535	ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
 536			       map->map_pci_addr, map->map_size);
 537	if (ret) {
 538		pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
 539				      map->map_size);
 540		return ret;
 541	}
 542
 543	return 0;
 544}
 545EXPORT_SYMBOL_GPL(pci_epc_mem_map);
 546
 547/**
 548 * pci_epc_mem_unmap() - unmap and free a CPU address region
 549 * @epc: the EPC device on which the CPU address is allocated and mapped
 550 * @func_no: the physical endpoint function number in the EPC device
 551 * @vfunc_no: the virtual endpoint function number in the physical function
 552 * @map: the mapping information
 553 *
 554 * Unmap and free a CPU address region that was allocated and mapped with
 555 * pci_epc_mem_map().
 556 */
 557void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 558		       struct pci_epc_map *map)
 559{
 560	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 561		return;
 562
 563	if (!map || !map->virt_base)
 564		return;
 565
 566	pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
 567	pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
 568			      map->map_size);
 569}
 570EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
 571
 572/**
 573 * pci_epc_clear_bar() - reset the BAR
 574 * @epc: the EPC device for which the BAR has to be cleared
 575 * @func_no: the physical endpoint function number in the EPC device
 576 * @vfunc_no: the virtual endpoint function number in the physical function
 577 * @epf_bar: the struct epf_bar that contains the BAR information
 578 *
 579 * Invoke to reset the BAR of the endpoint device.
 580 */
 581void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 582		       struct pci_epf_bar *epf_bar)
 583{
 584	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 585		return;
 586
 587	if (epf_bar->barno == BAR_5 &&
 588	    epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
 
 589		return;
 590
 591	if (!epc->ops->clear_bar)
 592		return;
 593
 594	mutex_lock(&epc->lock);
 595	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
 596	mutex_unlock(&epc->lock);
 597}
 598EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
 599
 600/**
 601 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
 602 * @epc: the EPC device on which BAR has to be configured
 603 * @func_no: the physical endpoint function number in the EPC device
 604 * @vfunc_no: the virtual endpoint function number in the physical function
 605 * @epf_bar: the struct epf_bar that contains the BAR information
 606 *
 607 * Invoke to configure the BAR of the endpoint device.
 608 */
 609int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 610		    struct pci_epf_bar *epf_bar)
 611{
 612	const struct pci_epc_features *epc_features;
 613	enum pci_barno bar = epf_bar->barno;
 614	int flags = epf_bar->flags;
 615	int ret;
 616
 617	epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
 618	if (!epc_features)
 619		return -EINVAL;
 620
 621	if (epc_features->bar[bar].type == BAR_FIXED &&
 622	    (epc_features->bar[bar].fixed_size != epf_bar->size))
 623		return -EINVAL;
 624
 625	if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
 626	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
 627	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
 628	    (upper_32_bits(epf_bar->size) &&
 629	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
 630		return -EINVAL;
 631
 632	if (!epc->ops->set_bar)
 633		return 0;
 634
 635	mutex_lock(&epc->lock);
 636	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
 637	mutex_unlock(&epc->lock);
 638
 639	return ret;
 640}
 641EXPORT_SYMBOL_GPL(pci_epc_set_bar);
 642
 643/**
 644 * pci_epc_write_header() - write standard configuration header
 645 * @epc: the EPC device to which the configuration header should be written
 646 * @func_no: the physical endpoint function number in the EPC device
 647 * @vfunc_no: the virtual endpoint function number in the physical function
 648 * @header: standard configuration header fields
 649 *
 650 * Invoke to write the configuration header to the endpoint controller. Every
 651 * endpoint controller will have a dedicated location to which the standard
 652 * configuration header would be written. The callback function should write
 653 * the header fields to this dedicated location.
 654 */
 655int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 656			 struct pci_epf_header *header)
 657{
 658	int ret;
 
 659
 660	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
 661		return -EINVAL;
 662
 663	/* Only Virtual Function #1 has deviceID */
 664	if (vfunc_no > 1)
 665		return -EINVAL;
 666
 667	if (!epc->ops->write_header)
 668		return 0;
 669
 670	mutex_lock(&epc->lock);
 671	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
 672	mutex_unlock(&epc->lock);
 673
 674	return ret;
 675}
 676EXPORT_SYMBOL_GPL(pci_epc_write_header);
 677
 678/**
 679 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
 680 * @epc: the EPC device to which the endpoint function should be added
 681 * @epf: the endpoint function to be added
 682 * @type: Identifies if the EPC is connected to the primary or secondary
 683 *        interface of EPF
 684 *
 685 * A PCI endpoint device can have one or more functions. In the case of PCIe,
 686 * the specification allows up to 8 PCIe endpoint functions. Invoke
 687 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
 688 */
 689int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
 690		    enum pci_epc_interface_type type)
 691{
 692	struct list_head *list;
 693	u32 func_no;
 694	int ret = 0;
 695
 696	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
 697		return -EINVAL;
 698
 699	if (type == PRIMARY_INTERFACE && epf->epc)
 700		return -EBUSY;
 701
 702	if (type == SECONDARY_INTERFACE && epf->sec_epc)
 703		return -EBUSY;
 704
 705	mutex_lock(&epc->list_lock);
 706	func_no = find_first_zero_bit(&epc->function_num_map,
 707				      BITS_PER_LONG);
 708	if (func_no >= BITS_PER_LONG) {
 709		ret = -EINVAL;
 710		goto ret;
 711	}
 712
 713	if (func_no > epc->max_functions - 1) {
 714		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
 715		ret = -EINVAL;
 716		goto ret;
 717	}
 718
 719	set_bit(func_no, &epc->function_num_map);
 720	if (type == PRIMARY_INTERFACE) {
 721		epf->func_no = func_no;
 722		epf->epc = epc;
 723		list = &epf->list;
 724	} else {
 725		epf->sec_epc_func_no = func_no;
 726		epf->sec_epc = epc;
 727		list = &epf->sec_epc_list;
 728	}
 729
 730	list_add_tail(list, &epc->pci_epf);
 731ret:
 732	mutex_unlock(&epc->list_lock);
 733
 734	return ret;
 735}
 736EXPORT_SYMBOL_GPL(pci_epc_add_epf);
 737
 738/**
 739 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
 740 * @epc: the EPC device from which the endpoint function should be removed
 741 * @epf: the endpoint function to be removed
 742 * @type: identifies if the EPC is connected to the primary or secondary
 743 *        interface of EPF
 744 *
 745 * Invoke to remove PCI endpoint function from the endpoint controller.
 746 */
 747void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
 748			enum pci_epc_interface_type type)
 749{
 750	struct list_head *list;
 751	u32 func_no = 0;
 752
 753	if (IS_ERR_OR_NULL(epc) || !epf)
 754		return;
 755
 756	mutex_lock(&epc->list_lock);
 757	if (type == PRIMARY_INTERFACE) {
 758		func_no = epf->func_no;
 759		list = &epf->list;
 760		epf->epc = NULL;
 761	} else {
 762		func_no = epf->sec_epc_func_no;
 763		list = &epf->sec_epc_list;
 764		epf->sec_epc = NULL;
 765	}
 766	clear_bit(func_no, &epc->function_num_map);
 767	list_del(list);
 768	mutex_unlock(&epc->list_lock);
 769}
 770EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
 771
 772/**
 773 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
 774 *		      connection with the Root Complex.
 775 * @epc: the EPC device which has established link with the host
 776 *
 777 * Invoke to Notify the EPF device that the EPC device has established a
 778 * connection with the Root Complex.
 779 */
 780void pci_epc_linkup(struct pci_epc *epc)
 781{
 
 782	struct pci_epf *epf;
 783
 784	if (IS_ERR_OR_NULL(epc))
 785		return;
 786
 787	mutex_lock(&epc->list_lock);
 788	list_for_each_entry(epf, &epc->pci_epf, list) {
 789		mutex_lock(&epf->lock);
 790		if (epf->event_ops && epf->event_ops->link_up)
 791			epf->event_ops->link_up(epf);
 792		mutex_unlock(&epf->lock);
 793	}
 794	mutex_unlock(&epc->list_lock);
 795}
 796EXPORT_SYMBOL_GPL(pci_epc_linkup);
 797
 798/**
 799 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
 800 *			connection with the Root Complex.
 801 * @epc: the EPC device which has dropped the link with the host
 802 *
 803 * Invoke to Notify the EPF device that the EPC device has dropped the
 804 * connection with the Root Complex.
 805 */
 806void pci_epc_linkdown(struct pci_epc *epc)
 807{
 808	struct pci_epf *epf;
 809
 810	if (IS_ERR_OR_NULL(epc))
 811		return;
 812
 813	mutex_lock(&epc->list_lock);
 814	list_for_each_entry(epf, &epc->pci_epf, list) {
 815		mutex_lock(&epf->lock);
 816		if (epf->event_ops && epf->event_ops->link_down)
 817			epf->event_ops->link_down(epf);
 818		mutex_unlock(&epf->lock);
 819	}
 820	mutex_unlock(&epc->list_lock);
 821}
 822EXPORT_SYMBOL_GPL(pci_epc_linkdown);
 823
 824/**
 825 * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
 826 *                         is completed.
 827 * @epc: the EPC device whose initialization is completed
 828 *
 829 * Invoke to Notify the EPF device that the EPC device's initialization
 830 * is completed.
 831 */
 832void pci_epc_init_notify(struct pci_epc *epc)
 833{
 834	struct pci_epf *epf;
 835
 836	if (IS_ERR_OR_NULL(epc))
 837		return;
 838
 839	mutex_lock(&epc->list_lock);
 840	list_for_each_entry(epf, &epc->pci_epf, list) {
 841		mutex_lock(&epf->lock);
 842		if (epf->event_ops && epf->event_ops->epc_init)
 843			epf->event_ops->epc_init(epf);
 844		mutex_unlock(&epf->lock);
 845	}
 846	epc->init_complete = true;
 847	mutex_unlock(&epc->list_lock);
 848}
 849EXPORT_SYMBOL_GPL(pci_epc_init_notify);
 850
 851/**
 852 * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
 853 *                                 complete to the EPF device
 854 * @epc: the EPC device whose initialization is pending to be notified
 855 * @epf: the EPF device to be notified
 856 *
 857 * Invoke to notify the pending EPC device initialization complete to the EPF
 858 * device. This is used to deliver the notification if the EPC initialization
 859 * got completed before the EPF driver bind.
 860 */
 861void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
 862{
 863	if (epc->init_complete) {
 864		mutex_lock(&epf->lock);
 865		if (epf->event_ops && epf->event_ops->epc_init)
 866			epf->event_ops->epc_init(epf);
 867		mutex_unlock(&epf->lock);
 868	}
 869}
 870EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
 871
 872/**
 873 * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
 874 * @epc: the EPC device whose deinitialization is completed
 875 *
 876 * Invoke to notify the EPF device that the EPC deinitialization is completed.
 877 */
 878void pci_epc_deinit_notify(struct pci_epc *epc)
 879{
 880	struct pci_epf *epf;
 881
 882	if (IS_ERR_OR_NULL(epc))
 883		return;
 884
 885	mutex_lock(&epc->list_lock);
 886	list_for_each_entry(epf, &epc->pci_epf, list) {
 887		mutex_lock(&epf->lock);
 888		if (epf->event_ops && epf->event_ops->epc_deinit)
 889			epf->event_ops->epc_deinit(epf);
 890		mutex_unlock(&epf->lock);
 891	}
 892	epc->init_complete = false;
 893	mutex_unlock(&epc->list_lock);
 894}
 895EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
 896
 897/**
 898 * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
 899 *					device has received the Bus Master
 900 *					Enable event from the Root complex
 901 * @epc: the EPC device that received the Bus Master Enable event
 902 *
 903 * Notify the EPF device that the EPC device has generated the Bus Master Enable
 904 * event due to host setting the Bus Master Enable bit in the Command register.
 905 */
 906void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
 907{
 908	struct pci_epf *epf;
 909
 910	if (IS_ERR_OR_NULL(epc))
 911		return;
 912
 913	mutex_lock(&epc->list_lock);
 914	list_for_each_entry(epf, &epc->pci_epf, list) {
 915		mutex_lock(&epf->lock);
 916		if (epf->event_ops && epf->event_ops->bus_master_enable)
 917			epf->event_ops->bus_master_enable(epf);
 918		mutex_unlock(&epf->lock);
 919	}
 920	mutex_unlock(&epc->list_lock);
 921}
 922EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
 923
 924/**
 925 * pci_epc_destroy() - destroy the EPC device
 926 * @epc: the EPC device that has to be destroyed
 927 *
 928 * Invoke to destroy the PCI EPC device
 929 */
 930void pci_epc_destroy(struct pci_epc *epc)
 931{
 932	pci_ep_cfs_remove_epc_group(epc->group);
 933#ifdef CONFIG_PCI_DOMAINS_GENERIC
 934	pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
 935#endif
 936	device_unregister(&epc->dev);
 
 937}
 938EXPORT_SYMBOL_GPL(pci_epc_destroy);
 939
 940/**
 941 * devm_pci_epc_destroy() - destroy the EPC device
 942 * @dev: device that wants to destroy the EPC
 943 * @epc: the EPC device that has to be destroyed
 944 *
 945 * Invoke to destroy the devres associated with this
 946 * pci_epc and destroy the EPC device.
 947 */
 948void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
 949{
 950	int r;
 951
 952	r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
 953			   epc);
 954	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
 955}
 956EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
 957
 958static void pci_epc_release(struct device *dev)
 959{
 960	kfree(to_pci_epc(dev));
 961}
 962
 963/**
 964 * __pci_epc_create() - create a new endpoint controller (EPC) device
 965 * @dev: device that is creating the new EPC
 966 * @ops: function pointers for performing EPC operations
 967 * @owner: the owner of the module that creates the EPC device
 968 *
 969 * Invoke to create a new EPC device and add it to pci_epc class.
 970 */
 971struct pci_epc *
 972__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
 973		 struct module *owner)
 974{
 975	int ret;
 976	struct pci_epc *epc;
 977
 978	if (WARN_ON(!dev)) {
 979		ret = -EINVAL;
 980		goto err_ret;
 981	}
 982
 983	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
 984	if (!epc) {
 985		ret = -ENOMEM;
 986		goto err_ret;
 987	}
 988
 989	mutex_init(&epc->lock);
 990	mutex_init(&epc->list_lock);
 991	INIT_LIST_HEAD(&epc->pci_epf);
 992
 993	device_initialize(&epc->dev);
 994	epc->dev.class = &pci_epc_class;
 995	epc->dev.parent = dev;
 996	epc->dev.release = pci_epc_release;
 997	epc->ops = ops;
 998
 999#ifdef CONFIG_PCI_DOMAINS_GENERIC
1000	epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
1001#else
1002	/*
1003	 * TODO: If the architecture doesn't support generic PCI
1004	 * domains, then a custom implementation has to be used.
1005	 */
1006	WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
1007#endif
1008
1009	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
1010	if (ret)
1011		goto put_dev;
1012
1013	ret = device_add(&epc->dev);
1014	if (ret)
1015		goto put_dev;
1016
1017	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
1018
1019	return epc;
1020
1021put_dev:
1022	put_device(&epc->dev);
 
1023
1024err_ret:
1025	return ERR_PTR(ret);
1026}
1027EXPORT_SYMBOL_GPL(__pci_epc_create);
1028
1029/**
1030 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
1031 * @dev: device that is creating the new EPC
1032 * @ops: function pointers for performing EPC operations
1033 * @owner: the owner of the module that creates the EPC device
1034 *
1035 * Invoke to create a new EPC device and add it to pci_epc class.
1036 * While at that, it also associates the device with the pci_epc using devres.
1037 * On driver detach, release function is invoked on the devres data,
1038 * then, devres data is freed.
1039 */
1040struct pci_epc *
1041__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
1042		      struct module *owner)
1043{
1044	struct pci_epc **ptr, *epc;
1045
1046	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
1047	if (!ptr)
1048		return ERR_PTR(-ENOMEM);
1049
1050	epc = __pci_epc_create(dev, ops, owner);
1051	if (!IS_ERR(epc)) {
1052		*ptr = epc;
1053		devres_add(dev, ptr);
1054	} else {
1055		devres_free(ptr);
1056	}
1057
1058	return epc;
1059}
1060EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
1061
1062static int __init pci_epc_init(void)
1063{
1064	return class_register(&pci_epc_class);
 
 
 
 
 
 
 
1065}
1066module_init(pci_epc_init);
1067
1068static void __exit pci_epc_exit(void)
1069{
1070	class_unregister(&pci_epc_class);
1071}
1072module_exit(pci_epc_exit);
1073
1074MODULE_DESCRIPTION("PCI EPC Library");
1075MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");