Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * drivers/pci/iov.c
  3 *
  4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  5 *
  6 * PCI Express I/O Virtualization (IOV) support.
  7 *   Single Root IOV 1.0
  8 *   Address Translation Service 1.0
 
 
  9 */
 10
 11#include <linux/pci.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/export.h>
 15#include <linux/string.h>
 16#include <linux/delay.h>
 17#include <linux/pci-ats.h>
 18#include "pci.h"
 19
 20#define VIRTFN_ID_LEN	16
 21
 22static inline u8 virtfn_bus(struct pci_dev *dev, int id)
 23{
 
 
 24	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
 25				    dev->sriov->stride * id) >> 8);
 26}
 27
 28static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
 29{
 
 
 30	return (dev->devfn + dev->sriov->offset +
 31		dev->sriov->stride * id) & 0xff;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32}
 33
 34static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
 35{
 36	int rc;
 37	struct pci_bus *child;
 38
 39	if (bus->number == busnr)
 40		return bus;
 41
 42	child = pci_find_bus(pci_domain_nr(bus), busnr);
 43	if (child)
 44		return child;
 45
 46	child = pci_add_new_bus(bus, NULL, busnr);
 47	if (!child)
 48		return NULL;
 49
 50	child->subordinate = busnr;
 51	child->dev.parent = bus->bridge;
 52	rc = pci_bus_add_child(child);
 53	if (rc) {
 54		pci_remove_bus(child);
 55		return NULL;
 56	}
 57
 58	return child;
 59}
 60
 61static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
 62{
 63	struct pci_bus *child;
 
 
 64
 65	if (bus->number == busnr)
 66		return;
 
 
 67
 68	child = pci_find_bus(pci_domain_nr(bus), busnr);
 69	BUG_ON(!child);
 
 
 
 
 70
 71	if (list_empty(&child->devices))
 72		pci_remove_bus(child);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73}
 74
 75static int virtfn_add(struct pci_dev *dev, int id, int reset)
 76{
 77	int i;
 78	int rc;
 79	u64 size;
 80	char buf[VIRTFN_ID_LEN];
 81	struct pci_dev *virtfn;
 82	struct resource *res;
 83	struct pci_sriov *iov = dev->sriov;
 
 84
 85	virtfn = alloc_pci_dev();
 
 
 
 
 86	if (!virtfn)
 87		return -ENOMEM;
 88
 89	mutex_lock(&iov->dev->sriov->lock);
 90	virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
 91	if (!virtfn->bus) {
 92		kfree(virtfn);
 93		mutex_unlock(&iov->dev->sriov->lock);
 94		return -ENOMEM;
 95	}
 96	virtfn->devfn = virtfn_devfn(dev, id);
 97	virtfn->vendor = dev->vendor;
 98	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
 99	pci_setup_device(virtfn);
 
 
 
 
 
 
 
 
 
100	virtfn->dev.parent = dev->dev.parent;
 
101
102	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
103		res = dev->resource + PCI_IOV_RESOURCES + i;
104		if (!res->parent)
105			continue;
106		virtfn->resource[i].name = pci_name(virtfn);
107		virtfn->resource[i].flags = res->flags;
108		size = resource_size(res);
109		do_div(size, iov->total);
110		virtfn->resource[i].start = res->start + size * id;
111		virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
112		rc = request_resource(res, &virtfn->resource[i]);
113		BUG_ON(rc);
114	}
115
116	if (reset)
117		__pci_reset_function(virtfn);
118
119	pci_device_add(virtfn, virtfn->bus);
120	mutex_unlock(&iov->dev->sriov->lock);
121
122	virtfn->physfn = pci_dev_get(dev);
123	virtfn->is_virtfn = 1;
124
125	rc = pci_bus_add_device(virtfn);
126	if (rc)
127		goto failed1;
128	sprintf(buf, "virtfn%u", id);
129	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
130	if (rc)
131		goto failed1;
132	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
133	if (rc)
134		goto failed2;
135
136	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
137
 
 
138	return 0;
139
140failed2:
141	sysfs_remove_link(&dev->dev.kobj, buf);
 
 
142failed1:
143	pci_dev_put(dev);
144	mutex_lock(&iov->dev->sriov->lock);
145	pci_stop_and_remove_bus_device(virtfn);
146	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
147	mutex_unlock(&iov->dev->sriov->lock);
148
149	return rc;
150}
151
152static void virtfn_remove(struct pci_dev *dev, int id, int reset)
153{
154	char buf[VIRTFN_ID_LEN];
155	struct pci_bus *bus;
156	struct pci_dev *virtfn;
157	struct pci_sriov *iov = dev->sriov;
158
159	bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
160	if (!bus)
161		return;
162
163	virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
 
 
164	if (!virtfn)
165		return;
166
167	pci_dev_put(virtfn);
168
169	if (reset) {
170		device_release_driver(&virtfn->dev);
171		__pci_reset_function(virtfn);
172	}
173
174	sprintf(buf, "virtfn%u", id);
175	sysfs_remove_link(&dev->dev.kobj, buf);
176	/*
177	 * pci_stop_dev() could have been called for this virtfn already,
178	 * so the directory for the virtfn may have been removed before.
179	 * Double check to avoid spurious sysfs warnings.
180	 */
181	if (virtfn->dev.kobj.sd)
182		sysfs_remove_link(&virtfn->dev.kobj, "physfn");
183
184	mutex_lock(&iov->dev->sriov->lock);
185	pci_stop_and_remove_bus_device(virtfn);
186	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
187	mutex_unlock(&iov->dev->sriov->lock);
188
 
 
189	pci_dev_put(dev);
190}
191
192static int sriov_migration(struct pci_dev *dev)
193{
194	u16 status;
195	struct pci_sriov *iov = dev->sriov;
196
197	if (!iov->nr_virtfn)
198		return 0;
199
200	if (!(iov->cap & PCI_SRIOV_CAP_VFM))
201		return 0;
202
203	pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
204	if (!(status & PCI_SRIOV_STATUS_VFM))
205		return 0;
206
207	schedule_work(&iov->mtask);
208
209	return 1;
210}
211
212static void sriov_migration_task(struct work_struct *work)
213{
214	int i;
215	u8 state;
216	u16 status;
217	struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
218
219	for (i = iov->initial; i < iov->nr_virtfn; i++) {
220		state = readb(iov->mstate + i);
221		if (state == PCI_SRIOV_VFM_MI) {
222			writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
223			state = readb(iov->mstate + i);
224			if (state == PCI_SRIOV_VFM_AV)
225				virtfn_add(iov->self, i, 1);
226		} else if (state == PCI_SRIOV_VFM_MO) {
227			virtfn_remove(iov->self, i, 1);
228			writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
229			state = readb(iov->mstate + i);
230			if (state == PCI_SRIOV_VFM_AV)
231				virtfn_add(iov->self, i, 0);
232		}
233	}
234
235	pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
236	status &= ~PCI_SRIOV_STATUS_VFM;
237	pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
238}
239
240static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
241{
242	int bir;
243	u32 table;
244	resource_size_t pa;
245	struct pci_sriov *iov = dev->sriov;
246
247	if (nr_virtfn <= iov->initial)
248		return 0;
249
250	pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
251	bir = PCI_SRIOV_VFM_BIR(table);
252	if (bir > PCI_STD_RESOURCE_END)
253		return -EIO;
254
255	table = PCI_SRIOV_VFM_OFFSET(table);
256	if (table + nr_virtfn > pci_resource_len(dev, bir))
257		return -EIO;
258
259	pa = pci_resource_start(dev, bir) + table;
260	iov->mstate = ioremap(pa, nr_virtfn);
261	if (!iov->mstate)
262		return -ENOMEM;
263
264	INIT_WORK(&iov->mtask, sriov_migration_task);
265
266	iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
267	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
268
269	return 0;
270}
271
272static void sriov_disable_migration(struct pci_dev *dev)
273{
274	struct pci_sriov *iov = dev->sriov;
275
276	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
277	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
278
279	cancel_work_sync(&iov->mtask);
280	iounmap(iov->mstate);
281}
282
283static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
284{
285	int rc;
286	int i, j;
287	int nres;
288	u16 offset, stride, initial;
289	struct resource *res;
290	struct pci_dev *pdev;
291	struct pci_sriov *iov = dev->sriov;
292	int bars = 0;
 
293
294	if (!nr_virtfn)
295		return 0;
296
297	if (iov->nr_virtfn)
298		return -EINVAL;
299
300	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
301	if (initial > iov->total ||
302	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
303		return -EIO;
304
305	if (nr_virtfn < 0 || nr_virtfn > iov->total ||
306	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
307		return -EINVAL;
308
309	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
310	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
311	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
312	if (!offset || (nr_virtfn > 1 && !stride))
313		return -EIO;
314
315	nres = 0;
316	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
317		bars |= (1 << (i + PCI_IOV_RESOURCES));
318		res = dev->resource + PCI_IOV_RESOURCES + i;
319		if (res->parent)
320			nres++;
321	}
322	if (nres != iov->nres) {
323		dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
324		return -ENOMEM;
325	}
326
327	iov->offset = offset;
328	iov->stride = stride;
329
330	if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
331		dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
332		return -ENOMEM;
333	}
334
335	if (pci_enable_resources(dev, bars)) {
336		dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
337		return -ENOMEM;
338	}
339
340	if (iov->link != dev->devfn) {
341		pdev = pci_get_slot(dev->bus, iov->link);
342		if (!pdev)
343			return -ENODEV;
344
345		pci_dev_put(pdev);
346
347		if (!pdev->is_physfn)
348			return -ENODEV;
349
350		rc = sysfs_create_link(&dev->dev.kobj,
351					&pdev->dev.kobj, "dep_link");
 
352		if (rc)
353			return rc;
354	}
355
 
 
 
 
 
 
 
 
 
 
 
356	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
357	pci_cfg_access_lock(dev);
358	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
359	msleep(100);
360	pci_cfg_access_unlock(dev);
361
362	iov->initial = initial;
363	if (nr_virtfn < initial)
364		initial = nr_virtfn;
365
366	for (i = 0; i < initial; i++) {
367		rc = virtfn_add(dev, i, 0);
368		if (rc)
369			goto failed;
370	}
371
372	if (iov->cap & PCI_SRIOV_CAP_VFM) {
373		rc = sriov_enable_migration(dev, nr_virtfn);
374		if (rc)
375			goto failed;
376	}
377
378	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
379	iov->nr_virtfn = nr_virtfn;
380
381	return 0;
382
383failed:
384	for (j = 0; j < i; j++)
385		virtfn_remove(dev, j, 0);
386
 
387	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
388	pci_cfg_access_lock(dev);
389	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
390	ssleep(1);
391	pci_cfg_access_unlock(dev);
392
 
 
393	if (iov->link != dev->devfn)
394		sysfs_remove_link(&dev->dev.kobj, "dep_link");
395
 
396	return rc;
397}
398
399static void sriov_disable(struct pci_dev *dev)
400{
401	int i;
402	struct pci_sriov *iov = dev->sriov;
403
404	if (!iov->nr_virtfn)
405		return;
406
407	if (iov->cap & PCI_SRIOV_CAP_VFM)
408		sriov_disable_migration(dev);
409
410	for (i = 0; i < iov->nr_virtfn; i++)
411		virtfn_remove(dev, i, 0);
412
413	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
414	pci_cfg_access_lock(dev);
415	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
416	ssleep(1);
417	pci_cfg_access_unlock(dev);
418
 
 
419	if (iov->link != dev->devfn)
420		sysfs_remove_link(&dev->dev.kobj, "dep_link");
421
422	iov->nr_virtfn = 0;
 
423}
424
425static int sriov_init(struct pci_dev *dev, int pos)
426{
427	int i;
428	int rc;
429	int nres;
430	u32 pgsz;
431	u16 ctrl, total, offset, stride;
432	struct pci_sriov *iov;
433	struct resource *res;
434	struct pci_dev *pdev;
435
436	if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
437	    dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
438		return -ENODEV;
439
440	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
441	if (ctrl & PCI_SRIOV_CTRL_VFE) {
442		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
443		ssleep(1);
444	}
445
446	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
447	if (!total)
448		return 0;
449
450	ctrl = 0;
451	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
452		if (pdev->is_physfn)
453			goto found;
454
455	pdev = NULL;
456	if (pci_ari_enabled(dev->bus))
457		ctrl |= PCI_SRIOV_CTRL_ARI;
458
459found:
460	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
461	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
462	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
463	if (!offset || (total > 1 && !stride))
464		return -EIO;
465
466	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
467	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
468	pgsz &= ~((1 << i) - 1);
469	if (!pgsz)
470		return -EIO;
471
472	pgsz &= ~(pgsz - 1);
473	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
474
 
 
 
 
475	nres = 0;
476	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
477		res = dev->resource + PCI_IOV_RESOURCES + i;
478		i += __pci_read_base(dev, pci_bar_unknown, res,
479				     pos + PCI_SRIOV_BAR + i * 4);
 
 
 
 
 
 
 
480		if (!res->flags)
481			continue;
482		if (resource_size(res) & (PAGE_SIZE - 1)) {
483			rc = -EIO;
484			goto failed;
485		}
 
486		res->end = res->start + resource_size(res) * total - 1;
 
 
 
487		nres++;
488	}
489
490	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
491	if (!iov) {
492		rc = -ENOMEM;
493		goto failed;
494	}
495
496	iov->pos = pos;
497	iov->nres = nres;
498	iov->ctrl = ctrl;
499	iov->total = total;
500	iov->offset = offset;
501	iov->stride = stride;
502	iov->pgsz = pgsz;
503	iov->self = dev;
 
504	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
505	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
506	if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
507		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
508
509	if (pdev)
510		iov->dev = pci_dev_get(pdev);
511	else
512		iov->dev = dev;
513
514	mutex_init(&iov->lock);
515
516	dev->sriov = iov;
517	dev->is_physfn = 1;
 
 
 
518
519	return 0;
520
 
 
 
521failed:
522	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
523		res = dev->resource + PCI_IOV_RESOURCES + i;
524		res->flags = 0;
525	}
526
 
527	return rc;
528}
529
530static void sriov_release(struct pci_dev *dev)
531{
532	BUG_ON(dev->sriov->nr_virtfn);
533
534	if (dev != dev->sriov->dev)
535		pci_dev_put(dev->sriov->dev);
536
537	mutex_destroy(&dev->sriov->lock);
538
539	kfree(dev->sriov);
540	dev->sriov = NULL;
541}
542
543static void sriov_restore_state(struct pci_dev *dev)
544{
545	int i;
546	u16 ctrl;
547	struct pci_sriov *iov = dev->sriov;
548
549	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
550	if (ctrl & PCI_SRIOV_CTRL_VFE)
551		return;
552
 
 
 
 
 
 
 
 
553	for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
554		pci_update_resource(dev, i);
555
556	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
557	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
558	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
559	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
560		msleep(100);
561}
562
563/**
564 * pci_iov_init - initialize the IOV capability
565 * @dev: the PCI device
566 *
567 * Returns 0 on success, or negative on failure.
568 */
569int pci_iov_init(struct pci_dev *dev)
570{
571	int pos;
572
573	if (!pci_is_pcie(dev))
574		return -ENODEV;
575
576	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
577	if (pos)
578		return sriov_init(dev, pos);
579
580	return -ENODEV;
581}
582
583/**
584 * pci_iov_release - release resources used by the IOV capability
585 * @dev: the PCI device
586 */
587void pci_iov_release(struct pci_dev *dev)
588{
589	if (dev->is_physfn)
590		sriov_release(dev);
591}
592
593/**
594 * pci_iov_resource_bar - get position of the SR-IOV BAR
595 * @dev: the PCI device
596 * @resno: the resource number
597 * @type: the BAR type to be filled in
598 *
599 * Returns position of the BAR encapsulated in the SR-IOV capability.
600 */
601int pci_iov_resource_bar(struct pci_dev *dev, int resno,
602			 enum pci_bar_type *type)
603{
604	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
605		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
607	BUG_ON(!dev->is_physfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608
609	*type = pci_bar_unknown;
 
 
 
 
 
 
610
611	return dev->sriov->pos + PCI_SRIOV_BAR +
612		4 * (resno - PCI_IOV_RESOURCES);
 
 
613}
614
615/**
616 * pci_sriov_resource_alignment - get resource alignment for VF BAR
617 * @dev: the PCI device
618 * @resno: the resource number
619 *
620 * Returns the alignment of the VF BAR found in the SR-IOV capability.
621 * This is not the same as the resource size which is defined as
622 * the VF BAR size multiplied by the number of VFs.  The alignment
623 * is just the VF BAR size.
624 */
625resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
626{
627	struct resource tmp;
628	enum pci_bar_type type;
629	int reg = pci_iov_resource_bar(dev, resno, &type);
630	
631	if (!reg)
632		return 0;
633
634	 __pci_read_base(dev, type, &tmp, reg);
635	return resource_alignment(&tmp);
636}
637
638/**
639 * pci_restore_iov_state - restore the state of the IOV capability
640 * @dev: the PCI device
641 */
642void pci_restore_iov_state(struct pci_dev *dev)
643{
644	if (dev->is_physfn)
645		sriov_restore_state(dev);
646}
647
648/**
 
 
 
 
 
 
 
 
 
 
 
649 * pci_iov_bus_range - find bus range used by Virtual Function
650 * @bus: the PCI bus
651 *
652 * Returns max number of buses (exclude current one) used by Virtual
653 * Functions.
654 */
655int pci_iov_bus_range(struct pci_bus *bus)
656{
657	int max = 0;
658	u8 busnr;
659	struct pci_dev *dev;
660
661	list_for_each_entry(dev, &bus->devices, bus_list) {
662		if (!dev->is_physfn)
663			continue;
664		busnr = virtfn_bus(dev, dev->sriov->total - 1);
665		if (busnr > max)
666			max = busnr;
667	}
668
669	return max ? max - bus->number : 0;
670}
671
672/**
673 * pci_enable_sriov - enable the SR-IOV capability
674 * @dev: the PCI device
675 * @nr_virtfn: number of virtual functions to enable
676 *
677 * Returns 0 on success, or negative on failure.
678 */
679int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
680{
681	might_sleep();
682
683	if (!dev->is_physfn)
684		return -ENODEV;
685
686	return sriov_enable(dev, nr_virtfn);
687}
688EXPORT_SYMBOL_GPL(pci_enable_sriov);
689
690/**
691 * pci_disable_sriov - disable the SR-IOV capability
692 * @dev: the PCI device
693 */
694void pci_disable_sriov(struct pci_dev *dev)
695{
696	might_sleep();
697
698	if (!dev->is_physfn)
699		return;
700
701	sriov_disable(dev);
702}
703EXPORT_SYMBOL_GPL(pci_disable_sriov);
704
705/**
706 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
707 * @dev: the PCI device
708 *
709 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
710 *
711 * Physical Function driver is responsible to register IRQ handler using
712 * VF Migration Interrupt Message Number, and call this function when the
713 * interrupt is generated by the hardware.
714 */
715irqreturn_t pci_sriov_migration(struct pci_dev *dev)
716{
717	if (!dev->is_physfn)
718		return IRQ_NONE;
719
720	return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
721}
722EXPORT_SYMBOL_GPL(pci_sriov_migration);
723
724/**
725 * pci_num_vf - return number of VFs associated with a PF device_release_driver
726 * @dev: the PCI device
727 *
728 * Returns number of VFs, or 0 if SR-IOV is not enabled.
 
729 */
730int pci_num_vf(struct pci_dev *dev)
731{
732	if (!dev || !dev->is_physfn)
 
 
 
 
 
733		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
734	else
735		return dev->sriov->nr_virtfn;
 
 
736}
737EXPORT_SYMBOL_GPL(pci_num_vf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PCI Express I/O Virtualization (IOV) support
 
 
 
 
  4 *   Single Root IOV 1.0
  5 *   Address Translation Service 1.0
  6 *
  7 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  8 */
  9
 10#include <linux/pci.h>
 11#include <linux/slab.h>
 12#include <linux/mutex.h>
 13#include <linux/export.h>
 14#include <linux/string.h>
 15#include <linux/delay.h>
 16#include <linux/pci-ats.h>
 17#include "pci.h"
 18
 19#define VIRTFN_ID_LEN	16
 20
 21int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
 22{
 23	if (!dev->is_physfn)
 24		return -EINVAL;
 25	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
 26				    dev->sriov->stride * vf_id) >> 8);
 27}
 28
 29int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
 30{
 31	if (!dev->is_physfn)
 32		return -EINVAL;
 33	return (dev->devfn + dev->sriov->offset +
 34		dev->sriov->stride * vf_id) & 0xff;
 35}
 36
 37/*
 38 * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
 39 * change when NumVFs changes.
 40 *
 41 * Update iov->offset and iov->stride when NumVFs is written.
 42 */
 43static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
 44{
 45	struct pci_sriov *iov = dev->sriov;
 46
 47	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
 48	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
 49	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
 50}
 51
 52/*
 53 * The PF consumes one bus number.  NumVFs, First VF Offset, and VF Stride
 54 * determine how many additional bus numbers will be consumed by VFs.
 55 *
 56 * Iterate over all valid NumVFs, validate offset and stride, and calculate
 57 * the maximum number of bus numbers that could ever be required.
 58 */
 59static int compute_max_vf_buses(struct pci_dev *dev)
 60{
 61	struct pci_sriov *iov = dev->sriov;
 62	int nr_virtfn, busnr, rc = 0;
 63
 64	for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
 65		pci_iov_set_numvfs(dev, nr_virtfn);
 66		if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
 67			rc = -EIO;
 68			goto out;
 69		}
 70
 71		busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
 72		if (busnr > iov->max_VF_buses)
 73			iov->max_VF_buses = busnr;
 74	}
 75
 76out:
 77	pci_iov_set_numvfs(dev, 0);
 78	return rc;
 79}
 80
 81static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
 82{
 
 83	struct pci_bus *child;
 84
 85	if (bus->number == busnr)
 86		return bus;
 87
 88	child = pci_find_bus(pci_domain_nr(bus), busnr);
 89	if (child)
 90		return child;
 91
 92	child = pci_add_new_bus(bus, NULL, busnr);
 93	if (!child)
 94		return NULL;
 95
 96	pci_bus_insert_busn_res(child, busnr, busnr);
 
 
 
 
 
 
 97
 98	return child;
 99}
100
101static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
102{
103	if (physbus != virtbus && list_empty(&virtbus->devices))
104		pci_remove_bus(virtbus);
105}
106
107resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
108{
109	if (!dev->is_physfn)
110		return 0;
111
112	return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
113}
114
115static void pci_read_vf_config_common(struct pci_dev *virtfn)
116{
117	struct pci_dev *physfn = virtfn->physfn;
118
119	/*
120	 * Some config registers are the same across all associated VFs.
121	 * Read them once from VF0 so we can skip reading them from the
122	 * other VFs.
123	 *
124	 * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
125	 * have the same Revision ID and Subsystem ID, but we assume they
126	 * do.
127	 */
128	pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
129			      &physfn->sriov->class);
130	pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
131			     &physfn->sriov->hdr_type);
132	pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
133			     &physfn->sriov->subsystem_vendor);
134	pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
135			     &physfn->sriov->subsystem_device);
136}
137
138int pci_iov_add_virtfn(struct pci_dev *dev, int id)
139{
140	int i;
141	int rc = -ENOMEM;
142	u64 size;
143	char buf[VIRTFN_ID_LEN];
144	struct pci_dev *virtfn;
145	struct resource *res;
146	struct pci_sriov *iov = dev->sriov;
147	struct pci_bus *bus;
148
149	bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
150	if (!bus)
151		goto failed;
152
153	virtfn = pci_alloc_dev(bus);
154	if (!virtfn)
155		goto failed0;
156
157	virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
 
 
 
 
 
 
 
158	virtfn->vendor = dev->vendor;
159	virtfn->device = iov->vf_device;
160	virtfn->is_virtfn = 1;
161	virtfn->physfn = pci_dev_get(dev);
162
163	if (id == 0)
164		pci_read_vf_config_common(virtfn);
165
166	rc = pci_setup_device(virtfn);
167	if (rc)
168		goto failed1;
169
170	virtfn->dev.parent = dev->dev.parent;
171	virtfn->multifunction = 0;
172
173	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
174		res = &dev->resource[i + PCI_IOV_RESOURCES];
175		if (!res->parent)
176			continue;
177		virtfn->resource[i].name = pci_name(virtfn);
178		virtfn->resource[i].flags = res->flags;
179		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
 
180		virtfn->resource[i].start = res->start + size * id;
181		virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
182		rc = request_resource(res, &virtfn->resource[i]);
183		BUG_ON(rc);
184	}
185
 
 
 
186	pci_device_add(virtfn, virtfn->bus);
 
 
 
 
187
 
 
 
188	sprintf(buf, "virtfn%u", id);
189	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
190	if (rc)
191		goto failed2;
192	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
193	if (rc)
194		goto failed3;
195
196	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
197
198	pci_bus_add_device(virtfn);
199
200	return 0;
201
202failed3:
203	sysfs_remove_link(&dev->dev.kobj, buf);
204failed2:
205	pci_stop_and_remove_bus_device(virtfn);
206failed1:
207	pci_dev_put(dev);
208failed0:
209	virtfn_remove_bus(dev->bus, bus);
210failed:
 
211
212	return rc;
213}
214
215void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
216{
217	char buf[VIRTFN_ID_LEN];
 
218	struct pci_dev *virtfn;
 
 
 
 
 
219
220	virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
221					     pci_iov_virtfn_bus(dev, id),
222					     pci_iov_virtfn_devfn(dev, id));
223	if (!virtfn)
224		return;
225
 
 
 
 
 
 
 
226	sprintf(buf, "virtfn%u", id);
227	sysfs_remove_link(&dev->dev.kobj, buf);
228	/*
229	 * pci_stop_dev() could have been called for this virtfn already,
230	 * so the directory for the virtfn may have been removed before.
231	 * Double check to avoid spurious sysfs warnings.
232	 */
233	if (virtfn->dev.kobj.sd)
234		sysfs_remove_link(&virtfn->dev.kobj, "physfn");
235
 
236	pci_stop_and_remove_bus_device(virtfn);
237	virtfn_remove_bus(dev->bus, virtfn->bus);
 
238
239	/* balance pci_get_domain_bus_and_slot() */
240	pci_dev_put(virtfn);
241	pci_dev_put(dev);
242}
243
244int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246	return 0;
247}
248
249int __weak pcibios_sriov_disable(struct pci_dev *pdev)
250{
251	return 0;
 
 
 
 
 
 
252}
253
254static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
255{
256	int rc;
257	int i;
258	int nres;
259	u16 initial;
260	struct resource *res;
261	struct pci_dev *pdev;
262	struct pci_sriov *iov = dev->sriov;
263	int bars = 0;
264	int bus;
265
266	if (!nr_virtfn)
267		return 0;
268
269	if (iov->num_VFs)
270		return -EINVAL;
271
272	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
273	if (initial > iov->total_VFs ||
274	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
275		return -EIO;
276
277	if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
278	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
279		return -EINVAL;
280
 
 
 
 
 
 
281	nres = 0;
282	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
283		bars |= (1 << (i + PCI_IOV_RESOURCES));
284		res = &dev->resource[i + PCI_IOV_RESOURCES];
285		if (res->parent)
286			nres++;
287	}
288	if (nres != iov->nres) {
289		pci_err(dev, "not enough MMIO resources for SR-IOV\n");
290		return -ENOMEM;
291	}
292
293	bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
294	if (bus > dev->bus->busn_res.end) {
295		pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
296			nr_virtfn, bus, &dev->bus->busn_res);
 
297		return -ENOMEM;
298	}
299
300	if (pci_enable_resources(dev, bars)) {
301		pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
302		return -ENOMEM;
303	}
304
305	if (iov->link != dev->devfn) {
306		pdev = pci_get_slot(dev->bus, iov->link);
307		if (!pdev)
308			return -ENODEV;
309
310		if (!pdev->is_physfn) {
311			pci_dev_put(pdev);
312			return -ENOSYS;
313		}
314
315		rc = sysfs_create_link(&dev->dev.kobj,
316					&pdev->dev.kobj, "dep_link");
317		pci_dev_put(pdev);
318		if (rc)
319			return rc;
320	}
321
322	iov->initial_VFs = initial;
323	if (nr_virtfn < initial)
324		initial = nr_virtfn;
325
326	rc = pcibios_sriov_enable(dev, initial);
327	if (rc) {
328		pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
329		goto err_pcibios;
330	}
331
332	pci_iov_set_numvfs(dev, nr_virtfn);
333	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
334	pci_cfg_access_lock(dev);
335	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
336	msleep(100);
337	pci_cfg_access_unlock(dev);
338
 
 
 
 
339	for (i = 0; i < initial; i++) {
340		rc = pci_iov_add_virtfn(dev, i);
 
 
 
 
 
 
341		if (rc)
342			goto failed;
343	}
344
345	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
346	iov->num_VFs = nr_virtfn;
347
348	return 0;
349
350failed:
351	while (i--)
352		pci_iov_remove_virtfn(dev, i);
353
354err_pcibios:
355	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
356	pci_cfg_access_lock(dev);
357	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
358	ssleep(1);
359	pci_cfg_access_unlock(dev);
360
361	pcibios_sriov_disable(dev);
362
363	if (iov->link != dev->devfn)
364		sysfs_remove_link(&dev->dev.kobj, "dep_link");
365
366	pci_iov_set_numvfs(dev, 0);
367	return rc;
368}
369
370static void sriov_disable(struct pci_dev *dev)
371{
372	int i;
373	struct pci_sriov *iov = dev->sriov;
374
375	if (!iov->num_VFs)
376		return;
377
378	for (i = 0; i < iov->num_VFs; i++)
379		pci_iov_remove_virtfn(dev, i);
 
 
 
380
381	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
382	pci_cfg_access_lock(dev);
383	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
384	ssleep(1);
385	pci_cfg_access_unlock(dev);
386
387	pcibios_sriov_disable(dev);
388
389	if (iov->link != dev->devfn)
390		sysfs_remove_link(&dev->dev.kobj, "dep_link");
391
392	iov->num_VFs = 0;
393	pci_iov_set_numvfs(dev, 0);
394}
395
396static int sriov_init(struct pci_dev *dev, int pos)
397{
398	int i, bar64;
399	int rc;
400	int nres;
401	u32 pgsz;
402	u16 ctrl, total;
403	struct pci_sriov *iov;
404	struct resource *res;
405	struct pci_dev *pdev;
406
 
 
 
 
407	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
408	if (ctrl & PCI_SRIOV_CTRL_VFE) {
409		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
410		ssleep(1);
411	}
412
 
 
 
 
413	ctrl = 0;
414	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
415		if (pdev->is_physfn)
416			goto found;
417
418	pdev = NULL;
419	if (pci_ari_enabled(dev->bus))
420		ctrl |= PCI_SRIOV_CTRL_ARI;
421
422found:
423	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
424
425	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
426	if (!total)
427		return 0;
428
429	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
430	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
431	pgsz &= ~((1 << i) - 1);
432	if (!pgsz)
433		return -EIO;
434
435	pgsz &= ~(pgsz - 1);
436	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
437
438	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
439	if (!iov)
440		return -ENOMEM;
441
442	nres = 0;
443	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
444		res = &dev->resource[i + PCI_IOV_RESOURCES];
445		/*
446		 * If it is already FIXED, don't change it, something
447		 * (perhaps EA or header fixups) wants it this way.
448		 */
449		if (res->flags & IORESOURCE_PCI_FIXED)
450			bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
451		else
452			bar64 = __pci_read_base(dev, pci_bar_unknown, res,
453						pos + PCI_SRIOV_BAR + i * 4);
454		if (!res->flags)
455			continue;
456		if (resource_size(res) & (PAGE_SIZE - 1)) {
457			rc = -EIO;
458			goto failed;
459		}
460		iov->barsz[i] = resource_size(res);
461		res->end = res->start + resource_size(res) * total - 1;
462		pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
463			 i, res, i, total);
464		i += bar64;
465		nres++;
466	}
467
 
 
 
 
 
 
468	iov->pos = pos;
469	iov->nres = nres;
470	iov->ctrl = ctrl;
471	iov->total_VFs = total;
472	pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
 
473	iov->pgsz = pgsz;
474	iov->self = dev;
475	iov->drivers_autoprobe = true;
476	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
477	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
478	if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
479		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
480
481	if (pdev)
482		iov->dev = pci_dev_get(pdev);
483	else
484		iov->dev = dev;
485
 
 
486	dev->sriov = iov;
487	dev->is_physfn = 1;
488	rc = compute_max_vf_buses(dev);
489	if (rc)
490		goto fail_max_buses;
491
492	return 0;
493
494fail_max_buses:
495	dev->sriov = NULL;
496	dev->is_physfn = 0;
497failed:
498	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
499		res = &dev->resource[i + PCI_IOV_RESOURCES];
500		res->flags = 0;
501	}
502
503	kfree(iov);
504	return rc;
505}
506
507static void sriov_release(struct pci_dev *dev)
508{
509	BUG_ON(dev->sriov->num_VFs);
510
511	if (dev != dev->sriov->dev)
512		pci_dev_put(dev->sriov->dev);
513
 
 
514	kfree(dev->sriov);
515	dev->sriov = NULL;
516}
517
518static void sriov_restore_state(struct pci_dev *dev)
519{
520	int i;
521	u16 ctrl;
522	struct pci_sriov *iov = dev->sriov;
523
524	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
525	if (ctrl & PCI_SRIOV_CTRL_VFE)
526		return;
527
528	/*
529	 * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
530	 * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
531	 */
532	ctrl &= ~PCI_SRIOV_CTRL_ARI;
533	ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
534	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
535
536	for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
537		pci_update_resource(dev, i);
538
539	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
540	pci_iov_set_numvfs(dev, iov->num_VFs);
541	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
542	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
543		msleep(100);
544}
545
546/**
547 * pci_iov_init - initialize the IOV capability
548 * @dev: the PCI device
549 *
550 * Returns 0 on success, or negative on failure.
551 */
552int pci_iov_init(struct pci_dev *dev)
553{
554	int pos;
555
556	if (!pci_is_pcie(dev))
557		return -ENODEV;
558
559	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
560	if (pos)
561		return sriov_init(dev, pos);
562
563	return -ENODEV;
564}
565
566/**
567 * pci_iov_release - release resources used by the IOV capability
568 * @dev: the PCI device
569 */
570void pci_iov_release(struct pci_dev *dev)
571{
572	if (dev->is_physfn)
573		sriov_release(dev);
574}
575
576/**
577 * pci_iov_update_resource - update a VF BAR
578 * @dev: the PCI device
579 * @resno: the resource number
 
580 *
581 * Update a VF BAR in the SR-IOV capability of a PF.
582 */
583void pci_iov_update_resource(struct pci_dev *dev, int resno)
 
584{
585	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
586	struct resource *res = dev->resource + resno;
587	int vf_bar = resno - PCI_IOV_RESOURCES;
588	struct pci_bus_region region;
589	u16 cmd;
590	u32 new;
591	int reg;
592
593	/*
594	 * The generic pci_restore_bars() path calls this for all devices,
595	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
596	 * have nothing to do.
597	 */
598	if (!iov)
599		return;
600
601	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
602	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
603		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
604			 vf_bar, res);
605		return;
606	}
607
608	/*
609	 * Ignore unimplemented BARs, unused resource slots for 64-bit
610	 * BARs, and non-movable resources, e.g., those described via
611	 * Enhanced Allocation.
612	 */
613	if (!res->flags)
614		return;
615
616	if (res->flags & IORESOURCE_UNSET)
617		return;
618
619	if (res->flags & IORESOURCE_PCI_FIXED)
620		return;
621
622	pcibios_resource_to_bus(dev->bus, &region, res);
623	new = region.start;
624	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
625
626	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
627	pci_write_config_dword(dev, reg, new);
628	if (res->flags & IORESOURCE_MEM_64) {
629		new = region.start >> 16 >> 16;
630		pci_write_config_dword(dev, reg + 4, new);
631	}
632}
633
634resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
635						      int resno)
636{
637	return pci_iov_resource_size(dev, resno);
638}
639
640/**
641 * pci_sriov_resource_alignment - get resource alignment for VF BAR
642 * @dev: the PCI device
643 * @resno: the resource number
644 *
645 * Returns the alignment of the VF BAR found in the SR-IOV capability.
646 * This is not the same as the resource size which is defined as
647 * the VF BAR size multiplied by the number of VFs.  The alignment
648 * is just the VF BAR size.
649 */
650resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
651{
652	return pcibios_iov_resource_alignment(dev, resno);
 
 
 
 
 
 
 
 
653}
654
655/**
656 * pci_restore_iov_state - restore the state of the IOV capability
657 * @dev: the PCI device
658 */
659void pci_restore_iov_state(struct pci_dev *dev)
660{
661	if (dev->is_physfn)
662		sriov_restore_state(dev);
663}
664
665/**
666 * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs
667 * @dev: the PCI device
668 * @auto_probe: set VF drivers auto probe flag
669 */
670void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe)
671{
672	if (dev->is_physfn)
673		dev->sriov->drivers_autoprobe = auto_probe;
674}
675
676/**
677 * pci_iov_bus_range - find bus range used by Virtual Function
678 * @bus: the PCI bus
679 *
680 * Returns max number of buses (exclude current one) used by Virtual
681 * Functions.
682 */
683int pci_iov_bus_range(struct pci_bus *bus)
684{
685	int max = 0;
 
686	struct pci_dev *dev;
687
688	list_for_each_entry(dev, &bus->devices, bus_list) {
689		if (!dev->is_physfn)
690			continue;
691		if (dev->sriov->max_VF_buses > max)
692			max = dev->sriov->max_VF_buses;
 
693	}
694
695	return max ? max - bus->number : 0;
696}
697
698/**
699 * pci_enable_sriov - enable the SR-IOV capability
700 * @dev: the PCI device
701 * @nr_virtfn: number of virtual functions to enable
702 *
703 * Returns 0 on success, or negative on failure.
704 */
705int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
706{
707	might_sleep();
708
709	if (!dev->is_physfn)
710		return -ENOSYS;
711
712	return sriov_enable(dev, nr_virtfn);
713}
714EXPORT_SYMBOL_GPL(pci_enable_sriov);
715
716/**
717 * pci_disable_sriov - disable the SR-IOV capability
718 * @dev: the PCI device
719 */
720void pci_disable_sriov(struct pci_dev *dev)
721{
722	might_sleep();
723
724	if (!dev->is_physfn)
725		return;
726
727	sriov_disable(dev);
728}
729EXPORT_SYMBOL_GPL(pci_disable_sriov);
730
731/**
732 * pci_num_vf - return number of VFs associated with a PF device_release_driver
733 * @dev: the PCI device
734 *
735 * Returns number of VFs, or 0 if SR-IOV is not enabled.
 
 
 
 
736 */
737int pci_num_vf(struct pci_dev *dev)
738{
739	if (!dev->is_physfn)
740		return 0;
741
742	return dev->sriov->num_VFs;
743}
744EXPORT_SYMBOL_GPL(pci_num_vf);
745
746/**
747 * pci_vfs_assigned - returns number of VFs are assigned to a guest
748 * @dev: the PCI device
749 *
750 * Returns number of VFs belonging to this device that are assigned to a guest.
751 * If device is not a physical function returns 0.
752 */
753int pci_vfs_assigned(struct pci_dev *dev)
754{
755	struct pci_dev *vfdev;
756	unsigned int vfs_assigned = 0;
757	unsigned short dev_id;
758
759	/* only search if we are a PF */
760	if (!dev->is_physfn)
761		return 0;
762
763	/*
764	 * determine the device ID for the VFs, the vendor ID will be the
765	 * same as the PF so there is no need to check for that one
766	 */
767	dev_id = dev->sriov->vf_device;
768
769	/* loop through all the VFs to see if we own any that are assigned */
770	vfdev = pci_get_device(dev->vendor, dev_id, NULL);
771	while (vfdev) {
772		/*
773		 * It is considered assigned if it is a virtual function with
774		 * our dev as the physical function and the assigned bit is set
775		 */
776		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
777			pci_is_dev_assigned(vfdev))
778			vfs_assigned++;
779
780		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
781	}
782
783	return vfs_assigned;
784}
785EXPORT_SYMBOL_GPL(pci_vfs_assigned);
786
787/**
788 * pci_sriov_set_totalvfs -- reduce the TotalVFs available
789 * @dev: the PCI PF device
790 * @numvfs: number that should be used for TotalVFs supported
791 *
792 * Should be called from PF driver's probe routine with
793 * device's mutex held.
794 *
795 * Returns 0 if PF is an SRIOV-capable device and
796 * value of numvfs valid. If not a PF return -ENOSYS;
797 * if numvfs is invalid return -EINVAL;
798 * if VFs already enabled, return -EBUSY.
799 */
800int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
801{
802	if (!dev->is_physfn)
803		return -ENOSYS;
804	if (numvfs > dev->sriov->total_VFs)
805		return -EINVAL;
806
807	/* Shouldn't change if VFs already enabled */
808	if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
809		return -EBUSY;
810	else
811		dev->sriov->driver_max_VFs = numvfs;
812
813	return 0;
814}
815EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
816
817/**
818 * pci_sriov_get_totalvfs -- get total VFs supported on this device
819 * @dev: the PCI PF device
820 *
821 * For a PCIe device with SRIOV support, return the PCIe
822 * SRIOV capability value of TotalVFs or the value of driver_max_VFs
823 * if the driver reduced it.  Otherwise 0.
824 */
825int pci_sriov_get_totalvfs(struct pci_dev *dev)
826{
827	if (!dev->is_physfn)
828		return 0;
829
830	if (dev->sriov->driver_max_VFs)
831		return dev->sriov->driver_max_VFs;
832
833	return dev->sriov->total_VFs;
834}
835EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);