Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * drivers/pci/iov.c
  3 *
  4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  5 *
  6 * PCI Express I/O Virtualization (IOV) support.
  7 *   Single Root IOV 1.0
  8 *   Address Translation Service 1.0
 
 
  9 */
 10
 11#include <linux/pci.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/string.h>
 15#include <linux/delay.h>
 16#include <linux/pci-ats.h>
 17#include "pci.h"
 18
 19#define VIRTFN_ID_LEN	16
 20
 21static inline u8 virtfn_bus(struct pci_dev *dev, int id)
 22{
 
 
 23	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
 24				    dev->sriov->stride * id) >> 8);
 25}
 26
 27static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
 28{
 
 
 29	return (dev->devfn + dev->sriov->offset +
 30		dev->sriov->stride * id) & 0xff;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31}
 32
 33static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
 34{
 35	int rc;
 36	struct pci_bus *child;
 37
 38	if (bus->number == busnr)
 39		return bus;
 40
 41	child = pci_find_bus(pci_domain_nr(bus), busnr);
 42	if (child)
 43		return child;
 44
 45	child = pci_add_new_bus(bus, NULL, busnr);
 46	if (!child)
 47		return NULL;
 48
 49	child->subordinate = busnr;
 50	child->dev.parent = bus->bridge;
 51	rc = pci_bus_add_child(child);
 52	if (rc) {
 53		pci_remove_bus(child);
 54		return NULL;
 55	}
 56
 57	return child;
 58}
 59
 60static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
 61{
 62	struct pci_bus *child;
 
 
 63
 64	if (bus->number == busnr)
 65		return;
 
 
 66
 67	child = pci_find_bus(pci_domain_nr(bus), busnr);
 68	BUG_ON(!child);
 69
 70	if (list_empty(&child->devices))
 71		pci_remove_bus(child);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72}
 73
 74static int virtfn_add(struct pci_dev *dev, int id, int reset)
 
 75{
 76	int i;
 77	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78	u64 size;
 79	char buf[VIRTFN_ID_LEN];
 80	struct pci_dev *virtfn;
 81	struct resource *res;
 82	struct pci_sriov *iov = dev->sriov;
 
 
 
 
 
 83
 84	virtfn = alloc_pci_dev();
 85	if (!virtfn)
 86		return -ENOMEM;
 87
 88	mutex_lock(&iov->dev->sriov->lock);
 89	virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
 90	if (!virtfn->bus) {
 91		kfree(virtfn);
 92		mutex_unlock(&iov->dev->sriov->lock);
 93		return -ENOMEM;
 94	}
 95	virtfn->devfn = virtfn_devfn(dev, id);
 96	virtfn->vendor = dev->vendor;
 97	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
 98	pci_setup_device(virtfn);
 
 
 
 
 
 
 
 
 
 
 99	virtfn->dev.parent = dev->dev.parent;
 
100
101	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
102		res = dev->resource + PCI_IOV_RESOURCES + i;
103		if (!res->parent)
104			continue;
105		virtfn->resource[i].name = pci_name(virtfn);
106		virtfn->resource[i].flags = res->flags;
107		size = resource_size(res);
108		do_div(size, iov->total);
109		virtfn->resource[i].start = res->start + size * id;
110		virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
111		rc = request_resource(res, &virtfn->resource[i]);
112		BUG_ON(rc);
113	}
114
115	if (reset)
116		__pci_reset_function(virtfn);
117
118	pci_device_add(virtfn, virtfn->bus);
119	mutex_unlock(&iov->dev->sriov->lock);
120
121	virtfn->physfn = pci_dev_get(dev);
122	virtfn->is_virtfn = 1;
123
124	rc = pci_bus_add_device(virtfn);
125	if (rc)
126		goto failed1;
127	sprintf(buf, "virtfn%u", id);
128	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
129	if (rc)
130		goto failed1;
131	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
132	if (rc)
133		goto failed2;
134
135	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
136
137	return 0;
138
139failed2:
140	sysfs_remove_link(&dev->dev.kobj, buf);
141failed1:
 
142	pci_dev_put(dev);
143	mutex_lock(&iov->dev->sriov->lock);
144	pci_remove_bus_device(virtfn);
145	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
146	mutex_unlock(&iov->dev->sriov->lock);
147
148	return rc;
149}
150
151static void virtfn_remove(struct pci_dev *dev, int id, int reset)
152{
153	char buf[VIRTFN_ID_LEN];
154	struct pci_bus *bus;
155	struct pci_dev *virtfn;
156	struct pci_sriov *iov = dev->sriov;
157
158	bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
159	if (!bus)
160		return;
161
162	virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
163	if (!virtfn)
164		return;
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166	pci_dev_put(virtfn);
 
 
167
168	if (reset) {
169		device_release_driver(&virtfn->dev);
170		__pci_reset_function(virtfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171	}
172
173	sprintf(buf, "virtfn%u", id);
174	sysfs_remove_link(&dev->dev.kobj, buf);
175	sysfs_remove_link(&virtfn->dev.kobj, "physfn");
 
 
 
176
177	mutex_lock(&iov->dev->sriov->lock);
178	pci_remove_bus_device(virtfn);
179	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
180	mutex_unlock(&iov->dev->sriov->lock);
 
181
182	pci_dev_put(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183}
184
185static int sriov_migration(struct pci_dev *dev)
 
 
186{
187	u16 status;
188	struct pci_sriov *iov = dev->sriov;
189
190	if (!iov->nr_virtfn)
191		return 0;
192
193	if (!(iov->cap & PCI_SRIOV_CAP_VFM))
194		return 0;
 
 
 
195
196	pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
197	if (!(status & PCI_SRIOV_STATUS_VFM))
198		return 0;
199
200	schedule_work(&iov->mtask);
 
 
 
 
201
202	return 1;
203}
204
205static void sriov_migration_task(struct work_struct *work)
 
 
206{
207	int i;
208	u8 state;
209	u16 status;
210	struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
211
212	for (i = iov->initial; i < iov->nr_virtfn; i++) {
213		state = readb(iov->mstate + i);
214		if (state == PCI_SRIOV_VFM_MI) {
215			writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
216			state = readb(iov->mstate + i);
217			if (state == PCI_SRIOV_VFM_AV)
218				virtfn_add(iov->self, i, 1);
219		} else if (state == PCI_SRIOV_VFM_MO) {
220			virtfn_remove(iov->self, i, 1);
221			writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
222			state = readb(iov->mstate + i);
223			if (state == PCI_SRIOV_VFM_AV)
224				virtfn_add(iov->self, i, 0);
225		}
226	}
227
228	pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
229	status &= ~PCI_SRIOV_STATUS_VFM;
230	pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
231}
232
233static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
 
 
234{
235	int bir;
236	u32 table;
237	resource_size_t pa;
238	struct pci_sriov *iov = dev->sriov;
239
240	if (nr_virtfn <= iov->initial)
241		return 0;
242
243	pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
244	bir = PCI_SRIOV_VFM_BIR(table);
245	if (bir > PCI_STD_RESOURCE_END)
246		return -EIO;
247
248	table = PCI_SRIOV_VFM_OFFSET(table);
249	if (table + nr_virtfn > pci_resource_len(dev, bir))
250		return -EIO;
251
252	pa = pci_resource_start(dev, bir) + table;
253	iov->mstate = ioremap(pa, nr_virtfn);
254	if (!iov->mstate)
255		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
257	INIT_WORK(&iov->mtask, sriov_migration_task);
 
 
 
258
259	iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
260	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
 
 
 
 
 
 
 
 
261
 
 
262	return 0;
263}
264
265static void sriov_disable_migration(struct pci_dev *dev)
266{
267	struct pci_sriov *iov = dev->sriov;
 
268
269	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
270	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
272	cancel_work_sync(&iov->mtask);
273	iounmap(iov->mstate);
274}
275
276static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
277{
278	int rc;
279	int i, j;
280	int nres;
281	u16 offset, stride, initial;
282	struct resource *res;
283	struct pci_dev *pdev;
284	struct pci_sriov *iov = dev->sriov;
 
 
285
286	if (!nr_virtfn)
287		return 0;
288
289	if (iov->nr_virtfn)
290		return -EINVAL;
291
292	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
293	if (initial > iov->total ||
294	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
295		return -EIO;
296
297	if (nr_virtfn < 0 || nr_virtfn > iov->total ||
298	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
299		return -EINVAL;
300
301	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
302	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
303	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
304	if (!offset || (nr_virtfn > 1 && !stride))
305		return -EIO;
306
307	nres = 0;
308	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
309		res = dev->resource + PCI_IOV_RESOURCES + i;
 
310		if (res->parent)
311			nres++;
312	}
313	if (nres != iov->nres) {
314		dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
315		return -ENOMEM;
316	}
317
318	iov->offset = offset;
319	iov->stride = stride;
 
 
 
 
320
321	if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
322		dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
323		return -ENOMEM;
324	}
325
326	if (iov->link != dev->devfn) {
327		pdev = pci_get_slot(dev->bus, iov->link);
328		if (!pdev)
329			return -ENODEV;
330
331		pci_dev_put(pdev);
332
333		if (!pdev->is_physfn)
334			return -ENODEV;
335
336		rc = sysfs_create_link(&dev->dev.kobj,
337					&pdev->dev.kobj, "dep_link");
 
338		if (rc)
339			return rc;
340	}
341
342	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
343	pci_block_user_cfg_access(dev);
344	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
345	msleep(100);
346	pci_unblock_user_cfg_access(dev);
347
348	iov->initial = initial;
349	if (nr_virtfn < initial)
350		initial = nr_virtfn;
351
352	for (i = 0; i < initial; i++) {
353		rc = virtfn_add(dev, i, 0);
354		if (rc)
355			goto failed;
356	}
357
358	if (iov->cap & PCI_SRIOV_CAP_VFM) {
359		rc = sriov_enable_migration(dev, nr_virtfn);
360		if (rc)
361			goto failed;
362	}
 
 
 
 
 
363
364	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
365	iov->nr_virtfn = nr_virtfn;
366
367	return 0;
368
369failed:
370	for (j = 0; j < i; j++)
371		virtfn_remove(dev, j, 0);
372
373	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
374	pci_block_user_cfg_access(dev);
375	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
376	ssleep(1);
377	pci_unblock_user_cfg_access(dev);
 
 
378
379	if (iov->link != dev->devfn)
380		sysfs_remove_link(&dev->dev.kobj, "dep_link");
381
 
382	return rc;
383}
384
385static void sriov_disable(struct pci_dev *dev)
386{
387	int i;
388	struct pci_sriov *iov = dev->sriov;
 
389
390	if (!iov->nr_virtfn)
391		return;
 
392
393	if (iov->cap & PCI_SRIOV_CAP_VFM)
394		sriov_disable_migration(dev);
 
395
396	for (i = 0; i < iov->nr_virtfn; i++)
397		virtfn_remove(dev, i, 0);
398
 
399	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
400	pci_block_user_cfg_access(dev);
401	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
402	ssleep(1);
403	pci_unblock_user_cfg_access(dev);
 
 
404
405	if (iov->link != dev->devfn)
406		sysfs_remove_link(&dev->dev.kobj, "dep_link");
407
408	iov->nr_virtfn = 0;
 
409}
410
411static int sriov_init(struct pci_dev *dev, int pos)
412{
413	int i;
414	int rc;
415	int nres;
416	u32 pgsz;
417	u16 ctrl, total, offset, stride;
418	struct pci_sriov *iov;
419	struct resource *res;
420	struct pci_dev *pdev;
421
422	if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
423	    dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
424		return -ENODEV;
425
426	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
427	if (ctrl & PCI_SRIOV_CTRL_VFE) {
428		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
429		ssleep(1);
430	}
431
432	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
433	if (!total)
434		return 0;
435
436	ctrl = 0;
437	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
438		if (pdev->is_physfn)
439			goto found;
440
441	pdev = NULL;
442	if (pci_ari_enabled(dev->bus))
443		ctrl |= PCI_SRIOV_CTRL_ARI;
444
445found:
446	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
447	pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
448	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
449	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
450	if (!offset || (total > 1 && !stride))
451		return -EIO;
452
453	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
454	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
455	pgsz &= ~((1 << i) - 1);
456	if (!pgsz)
457		return -EIO;
458
459	pgsz &= ~(pgsz - 1);
460	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
461
 
 
 
 
462	nres = 0;
463	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
464		res = dev->resource + PCI_IOV_RESOURCES + i;
465		i += __pci_read_base(dev, pci_bar_unknown, res,
466				     pos + PCI_SRIOV_BAR + i * 4);
 
 
 
 
 
 
 
467		if (!res->flags)
468			continue;
469		if (resource_size(res) & (PAGE_SIZE - 1)) {
470			rc = -EIO;
471			goto failed;
472		}
 
473		res->end = res->start + resource_size(res) * total - 1;
 
 
 
474		nres++;
475	}
476
477	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
478	if (!iov) {
479		rc = -ENOMEM;
480		goto failed;
481	}
482
483	iov->pos = pos;
484	iov->nres = nres;
485	iov->ctrl = ctrl;
486	iov->total = total;
487	iov->offset = offset;
488	iov->stride = stride;
489	iov->pgsz = pgsz;
490	iov->self = dev;
 
491	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
492	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
493	if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
494		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
495
496	if (pdev)
497		iov->dev = pci_dev_get(pdev);
498	else
499		iov->dev = dev;
500
501	mutex_init(&iov->lock);
502
503	dev->sriov = iov;
504	dev->is_physfn = 1;
 
 
 
505
506	return 0;
507
 
 
 
508failed:
509	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
510		res = dev->resource + PCI_IOV_RESOURCES + i;
511		res->flags = 0;
512	}
513
 
514	return rc;
515}
516
517static void sriov_release(struct pci_dev *dev)
518{
519	BUG_ON(dev->sriov->nr_virtfn);
520
521	if (dev != dev->sriov->dev)
522		pci_dev_put(dev->sriov->dev);
523
524	mutex_destroy(&dev->sriov->lock);
525
526	kfree(dev->sriov);
527	dev->sriov = NULL;
528}
529
530static void sriov_restore_state(struct pci_dev *dev)
531{
532	int i;
533	u16 ctrl;
534	struct pci_sriov *iov = dev->sriov;
535
536	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
537	if (ctrl & PCI_SRIOV_CTRL_VFE)
538		return;
539
540	for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
541		pci_update_resource(dev, i);
 
 
 
 
 
 
 
 
542
543	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
544	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
545	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
546	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
547		msleep(100);
548}
549
550/**
551 * pci_iov_init - initialize the IOV capability
552 * @dev: the PCI device
553 *
554 * Returns 0 on success, or negative on failure.
555 */
556int pci_iov_init(struct pci_dev *dev)
557{
558	int pos;
559
560	if (!pci_is_pcie(dev))
561		return -ENODEV;
562
563	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
564	if (pos)
565		return sriov_init(dev, pos);
566
567	return -ENODEV;
568}
569
570/**
571 * pci_iov_release - release resources used by the IOV capability
572 * @dev: the PCI device
573 */
574void pci_iov_release(struct pci_dev *dev)
575{
576	if (dev->is_physfn)
577		sriov_release(dev);
578}
579
580/**
581 * pci_iov_resource_bar - get position of the SR-IOV BAR
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582 * @dev: the PCI device
583 * @resno: the resource number
584 * @type: the BAR type to be filled in
585 *
586 * Returns position of the BAR encapsulated in the SR-IOV capability.
587 */
588int pci_iov_resource_bar(struct pci_dev *dev, int resno,
589			 enum pci_bar_type *type)
590{
591	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
592		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
593
594	BUG_ON(!dev->is_physfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
595
596	*type = pci_bar_unknown;
 
 
 
 
597
598	return dev->sriov->pos + PCI_SRIOV_BAR +
599		4 * (resno - PCI_IOV_RESOURCES);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
600}
601
602/**
603 * pci_sriov_resource_alignment - get resource alignment for VF BAR
604 * @dev: the PCI device
605 * @resno: the resource number
606 *
607 * Returns the alignment of the VF BAR found in the SR-IOV capability.
608 * This is not the same as the resource size which is defined as
609 * the VF BAR size multiplied by the number of VFs.  The alignment
610 * is just the VF BAR size.
611 */
612resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
613{
614	struct resource tmp;
615	enum pci_bar_type type;
616	int reg = pci_iov_resource_bar(dev, resno, &type);
617	
618	if (!reg)
619		return 0;
620
621	 __pci_read_base(dev, type, &tmp, reg);
622	return resource_alignment(&tmp);
623}
624
625/**
626 * pci_restore_iov_state - restore the state of the IOV capability
627 * @dev: the PCI device
628 */
629void pci_restore_iov_state(struct pci_dev *dev)
630{
631	if (dev->is_physfn)
632		sriov_restore_state(dev);
633}
634
635/**
 
 
 
 
 
 
 
 
 
 
 
636 * pci_iov_bus_range - find bus range used by Virtual Function
637 * @bus: the PCI bus
638 *
639 * Returns max number of buses (exclude current one) used by Virtual
640 * Functions.
641 */
642int pci_iov_bus_range(struct pci_bus *bus)
643{
644	int max = 0;
645	u8 busnr;
646	struct pci_dev *dev;
647
648	list_for_each_entry(dev, &bus->devices, bus_list) {
649		if (!dev->is_physfn)
650			continue;
651		busnr = virtfn_bus(dev, dev->sriov->total - 1);
652		if (busnr > max)
653			max = busnr;
654	}
655
656	return max ? max - bus->number : 0;
657}
658
659/**
660 * pci_enable_sriov - enable the SR-IOV capability
661 * @dev: the PCI device
662 * @nr_virtfn: number of virtual functions to enable
663 *
664 * Returns 0 on success, or negative on failure.
665 */
666int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
667{
668	might_sleep();
669
670	if (!dev->is_physfn)
671		return -ENODEV;
672
673	return sriov_enable(dev, nr_virtfn);
674}
675EXPORT_SYMBOL_GPL(pci_enable_sriov);
676
677/**
678 * pci_disable_sriov - disable the SR-IOV capability
679 * @dev: the PCI device
680 */
681void pci_disable_sriov(struct pci_dev *dev)
682{
683	might_sleep();
684
685	if (!dev->is_physfn)
686		return;
687
688	sriov_disable(dev);
689}
690EXPORT_SYMBOL_GPL(pci_disable_sriov);
691
692/**
693 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
694 * @dev: the PCI device
695 *
696 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
697 *
698 * Physical Function driver is responsible to register IRQ handler using
699 * VF Migration Interrupt Message Number, and call this function when the
700 * interrupt is generated by the hardware.
701 */
702irqreturn_t pci_sriov_migration(struct pci_dev *dev)
703{
704	if (!dev->is_physfn)
705		return IRQ_NONE;
706
707	return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
708}
709EXPORT_SYMBOL_GPL(pci_sriov_migration);
710
711/**
712 * pci_num_vf - return number of VFs associated with a PF device_release_driver
713 * @dev: the PCI device
714 *
715 * Returns number of VFs, or 0 if SR-IOV is not enabled.
 
716 */
717int pci_num_vf(struct pci_dev *dev)
718{
719	if (!dev || !dev->is_physfn)
720		return 0;
721	else
722		return dev->sriov->nr_virtfn;
723}
724EXPORT_SYMBOL_GPL(pci_num_vf);
725
726static int ats_alloc_one(struct pci_dev *dev, int ps)
727{
728	int pos;
729	u16 cap;
730	struct pci_ats *ats;
731
732	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
733	if (!pos)
734		return -ENODEV;
735
736	ats = kzalloc(sizeof(*ats), GFP_KERNEL);
737	if (!ats)
738		return -ENOMEM;
739
740	ats->pos = pos;
741	ats->stu = ps;
742	pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
743	ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
744					    PCI_ATS_MAX_QDEP;
745	dev->ats = ats;
 
 
 
 
 
 
 
 
 
 
746
747	return 0;
748}
749
750static void ats_free_one(struct pci_dev *dev)
751{
752	kfree(dev->ats);
753	dev->ats = NULL;
754}
 
755
756/**
757 * pci_enable_ats - enable the ATS capability
758 * @dev: the PCI device
759 * @ps: the IOMMU page shift
760 *
761 * Returns 0 on success, or negative on failure.
 
 
 
 
 
 
762 */
763int pci_enable_ats(struct pci_dev *dev, int ps)
764{
765	int rc;
766	u16 ctrl;
767
768	BUG_ON(dev->ats && dev->ats->is_enabled);
769
770	if (ps < PCI_ATS_MIN_STU)
771		return -EINVAL;
772
773	if (dev->is_physfn || dev->is_virtfn) {
774		struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
775
776		mutex_lock(&pdev->sriov->lock);
777		if (pdev->ats)
778			rc = pdev->ats->stu == ps ? 0 : -EINVAL;
779		else
780			rc = ats_alloc_one(pdev, ps);
781
782		if (!rc)
783			pdev->ats->ref_cnt++;
784		mutex_unlock(&pdev->sriov->lock);
785		if (rc)
786			return rc;
787	}
788
789	if (!dev->is_physfn) {
790		rc = ats_alloc_one(dev, ps);
791		if (rc)
792			return rc;
793	}
794
795	ctrl = PCI_ATS_CTRL_ENABLE;
796	if (!dev->is_virtfn)
797		ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
798	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
799
800	dev->ats->is_enabled = 1;
801
 
802	return 0;
803}
 
804
805/**
806 * pci_disable_ats - disable the ATS capability
807 * @dev: the PCI device
 
 
 
 
808 */
809void pci_disable_ats(struct pci_dev *dev)
810{
811	u16 ctrl;
812
813	BUG_ON(!dev->ats || !dev->ats->is_enabled);
814
815	pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
816	ctrl &= ~PCI_ATS_CTRL_ENABLE;
817	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
818
819	dev->ats->is_enabled = 0;
820
821	if (dev->is_physfn || dev->is_virtfn) {
822		struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
823
824		mutex_lock(&pdev->sriov->lock);
825		pdev->ats->ref_cnt--;
826		if (!pdev->ats->ref_cnt)
827			ats_free_one(pdev);
828		mutex_unlock(&pdev->sriov->lock);
829	}
830
831	if (!dev->is_physfn)
832		ats_free_one(dev);
 
 
833}
 
834
835/**
836 * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
837 * @dev: the PCI device
 
838 *
839 * Returns the queue depth on success, or negative on failure.
840 *
841 * The ATS spec uses 0 in the Invalidate Queue Depth field to
842 * indicate that the function can accept 32 Invalidate Request.
843 * But here we use the `real' values (i.e. 1~32) for the Queue
844 * Depth; and 0 indicates the function shares the Queue with
845 * other functions (doesn't exclusively own a Queue).
846 */
847int pci_ats_queue_depth(struct pci_dev *dev)
848{
849	int pos;
850	u16 cap;
851
852	if (dev->is_virtfn)
853		return 0;
854
855	if (dev->ats)
856		return dev->ats->qdep;
857
858	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
859	if (!pos)
860		return -ENODEV;
861
862	pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
 
 
 
 
 
 
 
 
 
 
 
 
863
864	return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
865				       PCI_ATS_MAX_QDEP;
866}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Express I/O Virtualization (IOV) support
 
 
 
 
   4 *   Single Root IOV 1.0
   5 *   Address Translation Service 1.0
   6 *
   7 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
   8 */
   9
  10#include <linux/pci.h>
  11#include <linux/slab.h>
  12#include <linux/export.h>
  13#include <linux/string.h>
  14#include <linux/delay.h>
 
  15#include "pci.h"
  16
  17#define VIRTFN_ID_LEN	16
  18
  19int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
  20{
  21	if (!dev->is_physfn)
  22		return -EINVAL;
  23	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
  24				    dev->sriov->stride * vf_id) >> 8);
  25}
  26
  27int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
  28{
  29	if (!dev->is_physfn)
  30		return -EINVAL;
  31	return (dev->devfn + dev->sriov->offset +
  32		dev->sriov->stride * vf_id) & 0xff;
  33}
  34EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
  35
  36/*
  37 * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
  38 * change when NumVFs changes.
  39 *
  40 * Update iov->offset and iov->stride when NumVFs is written.
  41 */
  42static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
  43{
  44	struct pci_sriov *iov = dev->sriov;
  45
  46	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
  47	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  48	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  49}
  50
  51/*
  52 * The PF consumes one bus number.  NumVFs, First VF Offset, and VF Stride
  53 * determine how many additional bus numbers will be consumed by VFs.
  54 *
  55 * Iterate over all valid NumVFs, validate offset and stride, and calculate
  56 * the maximum number of bus numbers that could ever be required.
  57 */
  58static int compute_max_vf_buses(struct pci_dev *dev)
  59{
  60	struct pci_sriov *iov = dev->sriov;
  61	int nr_virtfn, busnr, rc = 0;
  62
  63	for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
  64		pci_iov_set_numvfs(dev, nr_virtfn);
  65		if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
  66			rc = -EIO;
  67			goto out;
  68		}
  69
  70		busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
  71		if (busnr > iov->max_VF_buses)
  72			iov->max_VF_buses = busnr;
  73	}
  74
  75out:
  76	pci_iov_set_numvfs(dev, 0);
  77	return rc;
  78}
  79
  80static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
  81{
 
  82	struct pci_bus *child;
  83
  84	if (bus->number == busnr)
  85		return bus;
  86
  87	child = pci_find_bus(pci_domain_nr(bus), busnr);
  88	if (child)
  89		return child;
  90
  91	child = pci_add_new_bus(bus, NULL, busnr);
  92	if (!child)
  93		return NULL;
  94
  95	pci_bus_insert_busn_res(child, busnr, busnr);
 
 
 
 
 
 
  96
  97	return child;
  98}
  99
 100static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
 101{
 102	if (physbus != virtbus && list_empty(&virtbus->devices))
 103		pci_remove_bus(virtbus);
 104}
 105
 106resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
 107{
 108	if (!dev->is_physfn)
 109		return 0;
 110
 111	return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
 112}
 113
 114static void pci_read_vf_config_common(struct pci_dev *virtfn)
 115{
 116	struct pci_dev *physfn = virtfn->physfn;
 117
 118	/*
 119	 * Some config registers are the same across all associated VFs.
 120	 * Read them once from VF0 so we can skip reading them from the
 121	 * other VFs.
 122	 *
 123	 * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
 124	 * have the same Revision ID and Subsystem ID, but we assume they
 125	 * do.
 126	 */
 127	pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
 128			      &physfn->sriov->class);
 129	pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
 130			     &physfn->sriov->hdr_type);
 131	pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
 132			     &physfn->sriov->subsystem_vendor);
 133	pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
 134			     &physfn->sriov->subsystem_device);
 135}
 136
 137int pci_iov_sysfs_link(struct pci_dev *dev,
 138		struct pci_dev *virtfn, int id)
 139{
 140	char buf[VIRTFN_ID_LEN];
 141	int rc;
 142
 143	sprintf(buf, "virtfn%u", id);
 144	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
 145	if (rc)
 146		goto failed;
 147	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
 148	if (rc)
 149		goto failed1;
 150
 151	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
 152
 153	return 0;
 154
 155failed1:
 156	sysfs_remove_link(&dev->dev.kobj, buf);
 157failed:
 158	return rc;
 159}
 160
 161#ifdef CONFIG_PCI_MSI
 162static ssize_t sriov_vf_total_msix_show(struct device *dev,
 163					struct device_attribute *attr,
 164					char *buf)
 165{
 166	struct pci_dev *pdev = to_pci_dev(dev);
 167	u32 vf_total_msix = 0;
 168
 169	device_lock(dev);
 170	if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
 171		goto unlock;
 172
 173	vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
 174unlock:
 175	device_unlock(dev);
 176	return sysfs_emit(buf, "%u\n", vf_total_msix);
 177}
 178static DEVICE_ATTR_RO(sriov_vf_total_msix);
 179
 180static ssize_t sriov_vf_msix_count_store(struct device *dev,
 181					 struct device_attribute *attr,
 182					 const char *buf, size_t count)
 183{
 184	struct pci_dev *vf_dev = to_pci_dev(dev);
 185	struct pci_dev *pdev = pci_physfn(vf_dev);
 186	int val, ret;
 187
 188	ret = kstrtoint(buf, 0, &val);
 189	if (ret)
 190		return ret;
 191
 192	if (val < 0)
 193		return -EINVAL;
 194
 195	device_lock(&pdev->dev);
 196	if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
 197		ret = -EOPNOTSUPP;
 198		goto err_pdev;
 199	}
 200
 201	device_lock(&vf_dev->dev);
 202	if (vf_dev->driver) {
 203		/*
 204		 * A driver is already attached to this VF and has configured
 205		 * itself based on the current MSI-X vector count. Changing
 206		 * the vector size could mess up the driver, so block it.
 207		 */
 208		ret = -EBUSY;
 209		goto err_dev;
 210	}
 211
 212	ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
 213
 214err_dev:
 215	device_unlock(&vf_dev->dev);
 216err_pdev:
 217	device_unlock(&pdev->dev);
 218	return ret ? : count;
 219}
 220static DEVICE_ATTR_WO(sriov_vf_msix_count);
 221#endif
 222
 223static struct attribute *sriov_vf_dev_attrs[] = {
 224#ifdef CONFIG_PCI_MSI
 225	&dev_attr_sriov_vf_msix_count.attr,
 226#endif
 227	NULL,
 228};
 229
 230static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj,
 231					  struct attribute *a, int n)
 232{
 233	struct device *dev = kobj_to_dev(kobj);
 234	struct pci_dev *pdev = to_pci_dev(dev);
 235
 236	if (!pdev->is_virtfn)
 237		return 0;
 238
 239	return a->mode;
 240}
 241
 242const struct attribute_group sriov_vf_dev_attr_group = {
 243	.attrs = sriov_vf_dev_attrs,
 244	.is_visible = sriov_vf_attrs_are_visible,
 245};
 246
 247int pci_iov_add_virtfn(struct pci_dev *dev, int id)
 248{
 249	int i;
 250	int rc = -ENOMEM;
 251	u64 size;
 
 252	struct pci_dev *virtfn;
 253	struct resource *res;
 254	struct pci_sriov *iov = dev->sriov;
 255	struct pci_bus *bus;
 256
 257	bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
 258	if (!bus)
 259		goto failed;
 260
 261	virtfn = pci_alloc_dev(bus);
 262	if (!virtfn)
 263		goto failed0;
 264
 265	virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
 
 
 
 
 
 
 
 266	virtfn->vendor = dev->vendor;
 267	virtfn->device = iov->vf_device;
 268	virtfn->is_virtfn = 1;
 269	virtfn->physfn = pci_dev_get(dev);
 270	virtfn->no_command_memory = 1;
 271
 272	if (id == 0)
 273		pci_read_vf_config_common(virtfn);
 274
 275	rc = pci_setup_device(virtfn);
 276	if (rc)
 277		goto failed1;
 278
 279	virtfn->dev.parent = dev->dev.parent;
 280	virtfn->multifunction = 0;
 281
 282	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
 283		res = &dev->resource[i + PCI_IOV_RESOURCES];
 284		if (!res->parent)
 285			continue;
 286		virtfn->resource[i].name = pci_name(virtfn);
 287		virtfn->resource[i].flags = res->flags;
 288		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
 
 289		virtfn->resource[i].start = res->start + size * id;
 290		virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
 291		rc = request_resource(res, &virtfn->resource[i]);
 292		BUG_ON(rc);
 293	}
 294
 
 
 
 295	pci_device_add(virtfn, virtfn->bus);
 296	rc = pci_iov_sysfs_link(dev, virtfn, id);
 
 
 
 
 
 297	if (rc)
 298		goto failed1;
 
 
 
 
 
 
 
 299
 300	pci_bus_add_device(virtfn);
 301
 302	return 0;
 303
 
 
 304failed1:
 305	pci_stop_and_remove_bus_device(virtfn);
 306	pci_dev_put(dev);
 307failed0:
 308	virtfn_remove_bus(dev->bus, bus);
 309failed:
 
 310
 311	return rc;
 312}
 313
 314void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
 315{
 316	char buf[VIRTFN_ID_LEN];
 
 317	struct pci_dev *virtfn;
 
 318
 319	virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
 320					     pci_iov_virtfn_bus(dev, id),
 321					     pci_iov_virtfn_devfn(dev, id));
 
 
 322	if (!virtfn)
 323		return;
 324
 325	sprintf(buf, "virtfn%u", id);
 326	sysfs_remove_link(&dev->dev.kobj, buf);
 327	/*
 328	 * pci_stop_dev() could have been called for this virtfn already,
 329	 * so the directory for the virtfn may have been removed before.
 330	 * Double check to avoid spurious sysfs warnings.
 331	 */
 332	if (virtfn->dev.kobj.sd)
 333		sysfs_remove_link(&virtfn->dev.kobj, "physfn");
 334
 335	pci_stop_and_remove_bus_device(virtfn);
 336	virtfn_remove_bus(dev->bus, virtfn->bus);
 337
 338	/* balance pci_get_domain_bus_and_slot() */
 339	pci_dev_put(virtfn);
 340	pci_dev_put(dev);
 341}
 342
 343static ssize_t sriov_totalvfs_show(struct device *dev,
 344				   struct device_attribute *attr,
 345				   char *buf)
 346{
 347	struct pci_dev *pdev = to_pci_dev(dev);
 348
 349	return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
 350}
 351
 352static ssize_t sriov_numvfs_show(struct device *dev,
 353				 struct device_attribute *attr,
 354				 char *buf)
 355{
 356	struct pci_dev *pdev = to_pci_dev(dev);
 357	u16 num_vfs;
 358
 359	/* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
 360	device_lock(&pdev->dev);
 361	num_vfs = pdev->sriov->num_VFs;
 362	device_unlock(&pdev->dev);
 363
 364	return sysfs_emit(buf, "%u\n", num_vfs);
 365}
 366
 367/*
 368 * num_vfs > 0; number of VFs to enable
 369 * num_vfs = 0; disable all VFs
 370 *
 371 * Note: SRIOV spec does not allow partial VF
 372 *	 disable, so it's all or none.
 373 */
 374static ssize_t sriov_numvfs_store(struct device *dev,
 375				  struct device_attribute *attr,
 376				  const char *buf, size_t count)
 377{
 378	struct pci_dev *pdev = to_pci_dev(dev);
 379	int ret;
 380	u16 num_vfs;
 381
 382	ret = kstrtou16(buf, 0, &num_vfs);
 383	if (ret < 0)
 384		return ret;
 385
 386	if (num_vfs > pci_sriov_get_totalvfs(pdev))
 387		return -ERANGE;
 388
 389	device_lock(&pdev->dev);
 390
 391	if (num_vfs == pdev->sriov->num_VFs)
 392		goto exit;
 393
 394	/* is PF driver loaded */
 395	if (!pdev->driver) {
 396		pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
 397		ret = -ENOENT;
 398		goto exit;
 399	}
 400
 401	/* is PF driver loaded w/callback */
 402	if (!pdev->driver->sriov_configure) {
 403		pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
 404		ret = -ENOENT;
 405		goto exit;
 406	}
 407
 408	if (num_vfs == 0) {
 409		/* disable VFs */
 410		ret = pdev->driver->sriov_configure(pdev, 0);
 411		goto exit;
 412	}
 413
 414	/* enable VFs */
 415	if (pdev->sriov->num_VFs) {
 416		pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
 417			 pdev->sriov->num_VFs, num_vfs);
 418		ret = -EBUSY;
 419		goto exit;
 420	}
 421
 422	ret = pdev->driver->sriov_configure(pdev, num_vfs);
 423	if (ret < 0)
 424		goto exit;
 425
 426	if (ret != num_vfs)
 427		pci_warn(pdev, "%d VFs requested; only %d enabled\n",
 428			 num_vfs, ret);
 429
 430exit:
 431	device_unlock(&pdev->dev);
 432
 433	if (ret < 0)
 434		return ret;
 435
 436	return count;
 437}
 438
 439static ssize_t sriov_offset_show(struct device *dev,
 440				 struct device_attribute *attr,
 441				 char *buf)
 442{
 443	struct pci_dev *pdev = to_pci_dev(dev);
 
 444
 445	return sysfs_emit(buf, "%u\n", pdev->sriov->offset);
 446}
 447
 448static ssize_t sriov_stride_show(struct device *dev,
 449				 struct device_attribute *attr,
 450				 char *buf)
 451{
 452	struct pci_dev *pdev = to_pci_dev(dev);
 453
 454	return sysfs_emit(buf, "%u\n", pdev->sriov->stride);
 455}
 
 456
 457static ssize_t sriov_vf_device_show(struct device *dev,
 458				    struct device_attribute *attr,
 459				    char *buf)
 460{
 461	struct pci_dev *pdev = to_pci_dev(dev);
 462
 463	return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device);
 464}
 465
 466static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
 467					    struct device_attribute *attr,
 468					    char *buf)
 469{
 470	struct pci_dev *pdev = to_pci_dev(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 471
 472	return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe);
 
 
 473}
 474
 475static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
 476					     struct device_attribute *attr,
 477					     const char *buf, size_t count)
 478{
 479	struct pci_dev *pdev = to_pci_dev(dev);
 480	bool drivers_autoprobe;
 
 
 481
 482	if (kstrtobool(buf, &drivers_autoprobe) < 0)
 483		return -EINVAL;
 484
 485	pdev->sriov->drivers_autoprobe = drivers_autoprobe;
 
 
 
 486
 487	return count;
 488}
 
 489
 490static DEVICE_ATTR_RO(sriov_totalvfs);
 491static DEVICE_ATTR_RW(sriov_numvfs);
 492static DEVICE_ATTR_RO(sriov_offset);
 493static DEVICE_ATTR_RO(sriov_stride);
 494static DEVICE_ATTR_RO(sriov_vf_device);
 495static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
 496
 497static struct attribute *sriov_pf_dev_attrs[] = {
 498	&dev_attr_sriov_totalvfs.attr,
 499	&dev_attr_sriov_numvfs.attr,
 500	&dev_attr_sriov_offset.attr,
 501	&dev_attr_sriov_stride.attr,
 502	&dev_attr_sriov_vf_device.attr,
 503	&dev_attr_sriov_drivers_autoprobe.attr,
 504#ifdef CONFIG_PCI_MSI
 505	&dev_attr_sriov_vf_total_msix.attr,
 506#endif
 507	NULL,
 508};
 509
 510static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj,
 511					  struct attribute *a, int n)
 512{
 513	struct device *dev = kobj_to_dev(kobj);
 514
 515	if (!dev_is_pf(dev))
 516		return 0;
 517
 518	return a->mode;
 519}
 520
 521const struct attribute_group sriov_pf_dev_attr_group = {
 522	.attrs = sriov_pf_dev_attrs,
 523	.is_visible = sriov_pf_attrs_are_visible,
 524};
 525
 526int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 527{
 528	return 0;
 529}
 530
 531int __weak pcibios_sriov_disable(struct pci_dev *pdev)
 532{
 533	return 0;
 534}
 535
 536static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
 537{
 538	unsigned int i;
 539	int rc;
 540
 541	if (dev->no_vf_scan)
 542		return 0;
 543
 544	for (i = 0; i < num_vfs; i++) {
 545		rc = pci_iov_add_virtfn(dev, i);
 546		if (rc)
 547			goto failed;
 548	}
 549	return 0;
 550failed:
 551	while (i--)
 552		pci_iov_remove_virtfn(dev, i);
 553
 554	return rc;
 
 555}
 556
 557static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 558{
 559	int rc;
 560	int i;
 561	int nres;
 562	u16 initial;
 563	struct resource *res;
 564	struct pci_dev *pdev;
 565	struct pci_sriov *iov = dev->sriov;
 566	int bars = 0;
 567	int bus;
 568
 569	if (!nr_virtfn)
 570		return 0;
 571
 572	if (iov->num_VFs)
 573		return -EINVAL;
 574
 575	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
 576	if (initial > iov->total_VFs ||
 577	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
 578		return -EIO;
 579
 580	if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
 581	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
 582		return -EINVAL;
 583
 
 
 
 
 
 
 584	nres = 0;
 585	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
 586		bars |= (1 << (i + PCI_IOV_RESOURCES));
 587		res = &dev->resource[i + PCI_IOV_RESOURCES];
 588		if (res->parent)
 589			nres++;
 590	}
 591	if (nres != iov->nres) {
 592		pci_err(dev, "not enough MMIO resources for SR-IOV\n");
 593		return -ENOMEM;
 594	}
 595
 596	bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
 597	if (bus > dev->bus->busn_res.end) {
 598		pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
 599			nr_virtfn, bus, &dev->bus->busn_res);
 600		return -ENOMEM;
 601	}
 602
 603	if (pci_enable_resources(dev, bars)) {
 604		pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
 605		return -ENOMEM;
 606	}
 607
 608	if (iov->link != dev->devfn) {
 609		pdev = pci_get_slot(dev->bus, iov->link);
 610		if (!pdev)
 611			return -ENODEV;
 612
 613		if (!pdev->is_physfn) {
 614			pci_dev_put(pdev);
 615			return -ENOSYS;
 616		}
 617
 618		rc = sysfs_create_link(&dev->dev.kobj,
 619					&pdev->dev.kobj, "dep_link");
 620		pci_dev_put(pdev);
 621		if (rc)
 622			return rc;
 623	}
 624
 625	iov->initial_VFs = initial;
 
 
 
 
 
 
 626	if (nr_virtfn < initial)
 627		initial = nr_virtfn;
 628
 629	rc = pcibios_sriov_enable(dev, initial);
 630	if (rc) {
 631		pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
 632		goto err_pcibios;
 633	}
 634
 635	pci_iov_set_numvfs(dev, nr_virtfn);
 636	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
 637	pci_cfg_access_lock(dev);
 638	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
 639	msleep(100);
 640	pci_cfg_access_unlock(dev);
 641
 642	rc = sriov_add_vfs(dev, initial);
 643	if (rc)
 644		goto err_pcibios;
 645
 646	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
 647	iov->num_VFs = nr_virtfn;
 648
 649	return 0;
 650
 651err_pcibios:
 
 
 
 652	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
 653	pci_cfg_access_lock(dev);
 654	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
 655	ssleep(1);
 656	pci_cfg_access_unlock(dev);
 657
 658	pcibios_sriov_disable(dev);
 659
 660	if (iov->link != dev->devfn)
 661		sysfs_remove_link(&dev->dev.kobj, "dep_link");
 662
 663	pci_iov_set_numvfs(dev, 0);
 664	return rc;
 665}
 666
 667static void sriov_del_vfs(struct pci_dev *dev)
 668{
 
 669	struct pci_sriov *iov = dev->sriov;
 670	int i;
 671
 672	for (i = 0; i < iov->num_VFs; i++)
 673		pci_iov_remove_virtfn(dev, i);
 674}
 675
 676static void sriov_disable(struct pci_dev *dev)
 677{
 678	struct pci_sriov *iov = dev->sriov;
 679
 680	if (!iov->num_VFs)
 681		return;
 682
 683	sriov_del_vfs(dev);
 684	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
 685	pci_cfg_access_lock(dev);
 686	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
 687	ssleep(1);
 688	pci_cfg_access_unlock(dev);
 689
 690	pcibios_sriov_disable(dev);
 691
 692	if (iov->link != dev->devfn)
 693		sysfs_remove_link(&dev->dev.kobj, "dep_link");
 694
 695	iov->num_VFs = 0;
 696	pci_iov_set_numvfs(dev, 0);
 697}
 698
 699static int sriov_init(struct pci_dev *dev, int pos)
 700{
 701	int i, bar64;
 702	int rc;
 703	int nres;
 704	u32 pgsz;
 705	u16 ctrl, total;
 706	struct pci_sriov *iov;
 707	struct resource *res;
 708	struct pci_dev *pdev;
 709
 
 
 
 
 710	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
 711	if (ctrl & PCI_SRIOV_CTRL_VFE) {
 712		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
 713		ssleep(1);
 714	}
 715
 
 
 
 
 716	ctrl = 0;
 717	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
 718		if (pdev->is_physfn)
 719			goto found;
 720
 721	pdev = NULL;
 722	if (pci_ari_enabled(dev->bus))
 723		ctrl |= PCI_SRIOV_CTRL_ARI;
 724
 725found:
 726	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
 727
 728	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
 729	if (!total)
 730		return 0;
 
 731
 732	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
 733	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
 734	pgsz &= ~((1 << i) - 1);
 735	if (!pgsz)
 736		return -EIO;
 737
 738	pgsz &= ~(pgsz - 1);
 739	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
 740
 741	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
 742	if (!iov)
 743		return -ENOMEM;
 744
 745	nres = 0;
 746	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
 747		res = &dev->resource[i + PCI_IOV_RESOURCES];
 748		/*
 749		 * If it is already FIXED, don't change it, something
 750		 * (perhaps EA or header fixups) wants it this way.
 751		 */
 752		if (res->flags & IORESOURCE_PCI_FIXED)
 753			bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
 754		else
 755			bar64 = __pci_read_base(dev, pci_bar_unknown, res,
 756						pos + PCI_SRIOV_BAR + i * 4);
 757		if (!res->flags)
 758			continue;
 759		if (resource_size(res) & (PAGE_SIZE - 1)) {
 760			rc = -EIO;
 761			goto failed;
 762		}
 763		iov->barsz[i] = resource_size(res);
 764		res->end = res->start + resource_size(res) * total - 1;
 765		pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
 766			 i, res, i, total);
 767		i += bar64;
 768		nres++;
 769	}
 770
 
 
 
 
 
 
 771	iov->pos = pos;
 772	iov->nres = nres;
 773	iov->ctrl = ctrl;
 774	iov->total_VFs = total;
 775	iov->driver_max_VFs = total;
 776	pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
 777	iov->pgsz = pgsz;
 778	iov->self = dev;
 779	iov->drivers_autoprobe = true;
 780	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
 781	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
 782	if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
 783		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
 784
 785	if (pdev)
 786		iov->dev = pci_dev_get(pdev);
 787	else
 788		iov->dev = dev;
 789
 
 
 790	dev->sriov = iov;
 791	dev->is_physfn = 1;
 792	rc = compute_max_vf_buses(dev);
 793	if (rc)
 794		goto fail_max_buses;
 795
 796	return 0;
 797
 798fail_max_buses:
 799	dev->sriov = NULL;
 800	dev->is_physfn = 0;
 801failed:
 802	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
 803		res = &dev->resource[i + PCI_IOV_RESOURCES];
 804		res->flags = 0;
 805	}
 806
 807	kfree(iov);
 808	return rc;
 809}
 810
 811static void sriov_release(struct pci_dev *dev)
 812{
 813	BUG_ON(dev->sriov->num_VFs);
 814
 815	if (dev != dev->sriov->dev)
 816		pci_dev_put(dev->sriov->dev);
 817
 
 
 818	kfree(dev->sriov);
 819	dev->sriov = NULL;
 820}
 821
 822static void sriov_restore_state(struct pci_dev *dev)
 823{
 824	int i;
 825	u16 ctrl;
 826	struct pci_sriov *iov = dev->sriov;
 827
 828	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
 829	if (ctrl & PCI_SRIOV_CTRL_VFE)
 830		return;
 831
 832	/*
 833	 * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
 834	 * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
 835	 */
 836	ctrl &= ~PCI_SRIOV_CTRL_ARI;
 837	ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
 838	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
 839
 840	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
 841		pci_update_resource(dev, i + PCI_IOV_RESOURCES);
 842
 843	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
 844	pci_iov_set_numvfs(dev, iov->num_VFs);
 845	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
 846	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
 847		msleep(100);
 848}
 849
 850/**
 851 * pci_iov_init - initialize the IOV capability
 852 * @dev: the PCI device
 853 *
 854 * Returns 0 on success, or negative on failure.
 855 */
 856int pci_iov_init(struct pci_dev *dev)
 857{
 858	int pos;
 859
 860	if (!pci_is_pcie(dev))
 861		return -ENODEV;
 862
 863	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
 864	if (pos)
 865		return sriov_init(dev, pos);
 866
 867	return -ENODEV;
 868}
 869
 870/**
 871 * pci_iov_release - release resources used by the IOV capability
 872 * @dev: the PCI device
 873 */
 874void pci_iov_release(struct pci_dev *dev)
 875{
 876	if (dev->is_physfn)
 877		sriov_release(dev);
 878}
 879
 880/**
 881 * pci_iov_remove - clean up SR-IOV state after PF driver is detached
 882 * @dev: the PCI device
 883 */
 884void pci_iov_remove(struct pci_dev *dev)
 885{
 886	struct pci_sriov *iov = dev->sriov;
 887
 888	if (!dev->is_physfn)
 889		return;
 890
 891	iov->driver_max_VFs = iov->total_VFs;
 892	if (iov->num_VFs)
 893		pci_warn(dev, "driver left SR-IOV enabled after remove\n");
 894}
 895
 896/**
 897 * pci_iov_update_resource - update a VF BAR
 898 * @dev: the PCI device
 899 * @resno: the resource number
 
 900 *
 901 * Update a VF BAR in the SR-IOV capability of a PF.
 902 */
 903void pci_iov_update_resource(struct pci_dev *dev, int resno)
 
 904{
 905	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
 906	struct resource *res = dev->resource + resno;
 907	int vf_bar = resno - PCI_IOV_RESOURCES;
 908	struct pci_bus_region region;
 909	u16 cmd;
 910	u32 new;
 911	int reg;
 912
 913	/*
 914	 * The generic pci_restore_bars() path calls this for all devices,
 915	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
 916	 * have nothing to do.
 917	 */
 918	if (!iov)
 919		return;
 920
 921	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
 922	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
 923		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
 924			 vf_bar, res);
 925		return;
 926	}
 927
 928	/*
 929	 * Ignore unimplemented BARs, unused resource slots for 64-bit
 930	 * BARs, and non-movable resources, e.g., those described via
 931	 * Enhanced Allocation.
 932	 */
 933	if (!res->flags)
 934		return;
 935
 936	if (res->flags & IORESOURCE_UNSET)
 937		return;
 938
 939	if (res->flags & IORESOURCE_PCI_FIXED)
 940		return;
 941
 942	pcibios_resource_to_bus(dev->bus, &region, res);
 943	new = region.start;
 944	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
 945
 946	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
 947	pci_write_config_dword(dev, reg, new);
 948	if (res->flags & IORESOURCE_MEM_64) {
 949		new = region.start >> 16 >> 16;
 950		pci_write_config_dword(dev, reg + 4, new);
 951	}
 952}
 953
 954resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
 955						      int resno)
 956{
 957	return pci_iov_resource_size(dev, resno);
 958}
 959
 960/**
 961 * pci_sriov_resource_alignment - get resource alignment for VF BAR
 962 * @dev: the PCI device
 963 * @resno: the resource number
 964 *
 965 * Returns the alignment of the VF BAR found in the SR-IOV capability.
 966 * This is not the same as the resource size which is defined as
 967 * the VF BAR size multiplied by the number of VFs.  The alignment
 968 * is just the VF BAR size.
 969 */
 970resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
 971{
 972	return pcibios_iov_resource_alignment(dev, resno);
 
 
 
 
 
 
 
 
 973}
 974
 975/**
 976 * pci_restore_iov_state - restore the state of the IOV capability
 977 * @dev: the PCI device
 978 */
 979void pci_restore_iov_state(struct pci_dev *dev)
 980{
 981	if (dev->is_physfn)
 982		sriov_restore_state(dev);
 983}
 984
 985/**
 986 * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs
 987 * @dev: the PCI device
 988 * @auto_probe: set VF drivers auto probe flag
 989 */
 990void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe)
 991{
 992	if (dev->is_physfn)
 993		dev->sriov->drivers_autoprobe = auto_probe;
 994}
 995
 996/**
 997 * pci_iov_bus_range - find bus range used by Virtual Function
 998 * @bus: the PCI bus
 999 *
1000 * Returns max number of buses (exclude current one) used by Virtual
1001 * Functions.
1002 */
1003int pci_iov_bus_range(struct pci_bus *bus)
1004{
1005	int max = 0;
 
1006	struct pci_dev *dev;
1007
1008	list_for_each_entry(dev, &bus->devices, bus_list) {
1009		if (!dev->is_physfn)
1010			continue;
1011		if (dev->sriov->max_VF_buses > max)
1012			max = dev->sriov->max_VF_buses;
 
1013	}
1014
1015	return max ? max - bus->number : 0;
1016}
1017
1018/**
1019 * pci_enable_sriov - enable the SR-IOV capability
1020 * @dev: the PCI device
1021 * @nr_virtfn: number of virtual functions to enable
1022 *
1023 * Returns 0 on success, or negative on failure.
1024 */
1025int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1026{
1027	might_sleep();
1028
1029	if (!dev->is_physfn)
1030		return -ENOSYS;
1031
1032	return sriov_enable(dev, nr_virtfn);
1033}
1034EXPORT_SYMBOL_GPL(pci_enable_sriov);
1035
1036/**
1037 * pci_disable_sriov - disable the SR-IOV capability
1038 * @dev: the PCI device
1039 */
1040void pci_disable_sriov(struct pci_dev *dev)
1041{
1042	might_sleep();
1043
1044	if (!dev->is_physfn)
1045		return;
1046
1047	sriov_disable(dev);
1048}
1049EXPORT_SYMBOL_GPL(pci_disable_sriov);
1050
1051/**
1052 * pci_num_vf - return number of VFs associated with a PF device_release_driver
1053 * @dev: the PCI device
1054 *
1055 * Returns number of VFs, or 0 if SR-IOV is not enabled.
 
 
 
 
1056 */
1057int pci_num_vf(struct pci_dev *dev)
1058{
1059	if (!dev->is_physfn)
1060		return 0;
1061
1062	return dev->sriov->num_VFs;
1063}
1064EXPORT_SYMBOL_GPL(pci_num_vf);
1065
1066/**
1067 * pci_vfs_assigned - returns number of VFs are assigned to a guest
1068 * @dev: the PCI device
1069 *
1070 * Returns number of VFs belonging to this device that are assigned to a guest.
1071 * If device is not a physical function returns 0.
1072 */
1073int pci_vfs_assigned(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
1074{
1075	struct pci_dev *vfdev;
1076	unsigned int vfs_assigned = 0;
1077	unsigned short dev_id;
1078
1079	/* only search if we are a PF */
1080	if (!dev->is_physfn)
1081		return 0;
 
 
 
 
1082
1083	/*
1084	 * determine the device ID for the VFs, the vendor ID will be the
1085	 * same as the PF so there is no need to check for that one
1086	 */
1087	dev_id = dev->sriov->vf_device;
1088
1089	/* loop through all the VFs to see if we own any that are assigned */
1090	vfdev = pci_get_device(dev->vendor, dev_id, NULL);
1091	while (vfdev) {
1092		/*
1093		 * It is considered assigned if it is a virtual function with
1094		 * our dev as the physical function and the assigned bit is set
1095		 */
1096		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
1097			pci_is_dev_assigned(vfdev))
1098			vfs_assigned++;
1099
1100		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
1101	}
1102
1103	return vfs_assigned;
 
 
 
1104}
1105EXPORT_SYMBOL_GPL(pci_vfs_assigned);
1106
1107/**
1108 * pci_sriov_set_totalvfs -- reduce the TotalVFs available
1109 * @dev: the PCI PF device
1110 * @numvfs: number that should be used for TotalVFs supported
1111 *
1112 * Should be called from PF driver's probe routine with
1113 * device's mutex held.
1114 *
1115 * Returns 0 if PF is an SRIOV-capable device and
1116 * value of numvfs valid. If not a PF return -ENOSYS;
1117 * if numvfs is invalid return -EINVAL;
1118 * if VFs already enabled, return -EBUSY.
1119 */
1120int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
1121{
1122	if (!dev->is_physfn)
1123		return -ENOSYS;
 
 
1124
1125	if (numvfs > dev->sriov->total_VFs)
1126		return -EINVAL;
1127
1128	/* Shouldn't change if VFs already enabled */
1129	if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
1130		return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1131
1132	dev->sriov->driver_max_VFs = numvfs;
1133	return 0;
1134}
1135EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
1136
1137/**
1138 * pci_sriov_get_totalvfs -- get total VFs supported on this device
1139 * @dev: the PCI PF device
1140 *
1141 * For a PCIe device with SRIOV support, return the PCIe
1142 * SRIOV capability value of TotalVFs or the value of driver_max_VFs
1143 * if the driver reduced it.  Otherwise 0.
1144 */
1145int pci_sriov_get_totalvfs(struct pci_dev *dev)
1146{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147	if (!dev->is_physfn)
1148		return 0;
1149
1150	return dev->sriov->driver_max_VFs;
1151}
1152EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
1153
1154/**
1155 * pci_sriov_configure_simple - helper to configure SR-IOV
1156 * @dev: the PCI device
1157 * @nr_virtfn: number of virtual functions to enable, 0 to disable
1158 *
1159 * Enable or disable SR-IOV for devices that don't require any PF setup
1160 * before enabling SR-IOV.  Return value is negative on error, or number of
1161 * VFs allocated on success.
 
 
 
 
1162 */
1163int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
1164{
1165	int rc;
 
 
 
 
1166
1167	might_sleep();
 
1168
1169	if (!dev->is_physfn)
 
1170		return -ENODEV;
1171
1172	if (pci_vfs_assigned(dev)) {
1173		pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
1174		return -EPERM;
1175	}
1176
1177	if (nr_virtfn == 0) {
1178		sriov_disable(dev);
1179		return 0;
1180	}
1181
1182	rc = sriov_enable(dev, nr_virtfn);
1183	if (rc < 0)
1184		return rc;
1185
1186	return nr_virtfn;
 
1187}
1188EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);