Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2
  3#include <linux/virtio_pci_modern.h>
  4#include <linux/module.h>
  5#include <linux/pci.h>
  6#include <linux/delay.h>
  7
  8/*
  9 * vp_modern_map_capability - map a part of virtio pci capability
 10 * @mdev: the modern virtio-pci device
 11 * @off: offset of the capability
 12 * @minlen: minimal length of the capability
 13 * @align: align requirement
 14 * @start: start from the capability
 15 * @size: map size
 16 * @len: the length that is actually mapped
 17 * @pa: physical address of the capability
 18 *
 19 * Returns the io address of for the part of the capability
 20 */
 21static void __iomem *
 22vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
 23			 size_t minlen, u32 align, u32 start, u32 size,
 24			 size_t *len, resource_size_t *pa)
 25{
 26	struct pci_dev *dev = mdev->pci_dev;
 27	u8 bar;
 28	u32 offset, length;
 29	void __iomem *p;
 30
 31	pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
 32						 bar),
 33			     &bar);
 34	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
 35			     &offset);
 36	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
 37			      &length);
 38
 39	/* Check if the BAR may have changed since we requested the region. */
 40	if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
 41		dev_err(&dev->dev,
 42			"virtio_pci: bar unexpectedly changed to %u\n", bar);
 43		return NULL;
 44	}
 45
 46	if (length <= start) {
 47		dev_err(&dev->dev,
 48			"virtio_pci: bad capability len %u (>%u expected)\n",
 49			length, start);
 50		return NULL;
 51	}
 52
 53	if (length - start < minlen) {
 54		dev_err(&dev->dev,
 55			"virtio_pci: bad capability len %u (>=%zu expected)\n",
 56			length, minlen);
 57		return NULL;
 58	}
 59
 60	length -= start;
 61
 62	if (start + offset < offset) {
 63		dev_err(&dev->dev,
 64			"virtio_pci: map wrap-around %u+%u\n",
 65			start, offset);
 66		return NULL;
 67	}
 68
 69	offset += start;
 70
 71	if (offset & (align - 1)) {
 72		dev_err(&dev->dev,
 73			"virtio_pci: offset %u not aligned to %u\n",
 74			offset, align);
 75		return NULL;
 76	}
 77
 78	if (length > size)
 79		length = size;
 80
 81	if (len)
 82		*len = length;
 83
 84	if (minlen + offset < minlen ||
 85	    minlen + offset > pci_resource_len(dev, bar)) {
 86		dev_err(&dev->dev,
 87			"virtio_pci: map virtio %zu@%u "
 88			"out of range on bar %i length %lu\n",
 89			minlen, offset,
 90			bar, (unsigned long)pci_resource_len(dev, bar));
 91		return NULL;
 92	}
 93
 94	p = pci_iomap_range(dev, bar, offset, length);
 95	if (!p)
 96		dev_err(&dev->dev,
 97			"virtio_pci: unable to map virtio %u@%u on bar %i\n",
 98			length, offset, bar);
 99	else if (pa)
100		*pa = pci_resource_start(dev, bar) + offset;
101
102	return p;
103}
104
105/**
106 * virtio_pci_find_capability - walk capabilities to find device info.
107 * @dev: the pci device
108 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
109 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
110 * @bars: the bitmask of BARs
111 *
112 * Returns offset of the capability, or 0.
113 */
114static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
115					     u32 ioresource_types, int *bars)
116{
117	int pos;
118
119	for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
120	     pos > 0;
121	     pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
122		u8 type, bar;
123		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
124							 cfg_type),
125				     &type);
126		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
127							 bar),
128				     &bar);
129
130		/* Ignore structures with reserved BAR values */
131		if (bar >= PCI_STD_NUM_BARS)
132			continue;
133
134		if (type == cfg_type) {
135			if (pci_resource_len(dev, bar) &&
136			    pci_resource_flags(dev, bar) & ioresource_types) {
137				*bars |= (1 << bar);
138				return pos;
139			}
140		}
141	}
142	return 0;
143}
144
145/* This is part of the ABI.  Don't screw with it. */
146static inline void check_offsets(void)
147{
148	/* Note: disk space was harmed in compilation of this function. */
149	BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
150		     offsetof(struct virtio_pci_cap, cap_vndr));
151	BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
152		     offsetof(struct virtio_pci_cap, cap_next));
153	BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
154		     offsetof(struct virtio_pci_cap, cap_len));
155	BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
156		     offsetof(struct virtio_pci_cap, cfg_type));
157	BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
158		     offsetof(struct virtio_pci_cap, bar));
159	BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
160		     offsetof(struct virtio_pci_cap, offset));
161	BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
162		     offsetof(struct virtio_pci_cap, length));
163	BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
164		     offsetof(struct virtio_pci_notify_cap,
165			      notify_off_multiplier));
166	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
167		     offsetof(struct virtio_pci_common_cfg,
168			      device_feature_select));
169	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
170		     offsetof(struct virtio_pci_common_cfg, device_feature));
171	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
172		     offsetof(struct virtio_pci_common_cfg,
173			      guest_feature_select));
174	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
175		     offsetof(struct virtio_pci_common_cfg, guest_feature));
176	BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
177		     offsetof(struct virtio_pci_common_cfg, msix_config));
178	BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
179		     offsetof(struct virtio_pci_common_cfg, num_queues));
180	BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
181		     offsetof(struct virtio_pci_common_cfg, device_status));
182	BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
183		     offsetof(struct virtio_pci_common_cfg, config_generation));
184	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
185		     offsetof(struct virtio_pci_common_cfg, queue_select));
186	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
187		     offsetof(struct virtio_pci_common_cfg, queue_size));
188	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
189		     offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
190	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
191		     offsetof(struct virtio_pci_common_cfg, queue_enable));
192	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
193		     offsetof(struct virtio_pci_common_cfg, queue_notify_off));
194	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
195		     offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
196	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
197		     offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
198	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
199		     offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
200	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
201		     offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
202	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
203		     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
204	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
205		     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
206}
207
208/*
209 * vp_modern_probe: probe the modern virtio pci device, note that the
210 * caller is required to enable PCI device before calling this function.
211 * @mdev: the modern virtio-pci device
212 *
213 * Return 0 on succeed otherwise fail
214 */
215int vp_modern_probe(struct virtio_pci_modern_device *mdev)
216{
217	struct pci_dev *pci_dev = mdev->pci_dev;
218	int err, common, isr, notify, device;
219	u32 notify_length;
220	u32 notify_offset;
221
222	check_offsets();
223
224	/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
225	if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
226		return -ENODEV;
227
228	if (pci_dev->device < 0x1040) {
229		/* Transitional devices: use the PCI subsystem device id as
230		 * virtio device id, same as legacy driver always did.
231		 */
232		mdev->id.device = pci_dev->subsystem_device;
233	} else {
234		/* Modern devices: simply use PCI device id, but start from 0x1040. */
235		mdev->id.device = pci_dev->device - 0x1040;
236	}
237	mdev->id.vendor = pci_dev->subsystem_vendor;
238
239	/* check for a common config: if not, use legacy mode (bar 0). */
240	common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
241					    IORESOURCE_IO | IORESOURCE_MEM,
242					    &mdev->modern_bars);
243	if (!common) {
244		dev_info(&pci_dev->dev,
245			 "virtio_pci: leaving for legacy driver\n");
246		return -ENODEV;
247	}
248
249	/* If common is there, these should be too... */
250	isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
251					 IORESOURCE_IO | IORESOURCE_MEM,
252					 &mdev->modern_bars);
253	notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
254					    IORESOURCE_IO | IORESOURCE_MEM,
255					    &mdev->modern_bars);
256	if (!isr || !notify) {
257		dev_err(&pci_dev->dev,
258			"virtio_pci: missing capabilities %i/%i/%i\n",
259			common, isr, notify);
260		return -EINVAL;
261	}
262
263	err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
264	if (err)
265		err = dma_set_mask_and_coherent(&pci_dev->dev,
266						DMA_BIT_MASK(32));
267	if (err)
268		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
269
270	/* Device capability is only mandatory for devices that have
271	 * device-specific configuration.
272	 */
273	device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
274					    IORESOURCE_IO | IORESOURCE_MEM,
275					    &mdev->modern_bars);
276
277	err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
278					   "virtio-pci-modern");
279	if (err)
280		return err;
281
282	err = -EINVAL;
283	mdev->common = vp_modern_map_capability(mdev, common,
284				      sizeof(struct virtio_pci_common_cfg), 4,
285				      0, sizeof(struct virtio_pci_common_cfg),
286				      NULL, NULL);
287	if (!mdev->common)
288		goto err_map_common;
289	mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
290					     0, 1,
291					     NULL, NULL);
292	if (!mdev->isr)
293		goto err_map_isr;
294
295	/* Read notify_off_multiplier from config space. */
296	pci_read_config_dword(pci_dev,
297			      notify + offsetof(struct virtio_pci_notify_cap,
298						notify_off_multiplier),
299			      &mdev->notify_offset_multiplier);
300	/* Read notify length and offset from config space. */
301	pci_read_config_dword(pci_dev,
302			      notify + offsetof(struct virtio_pci_notify_cap,
303						cap.length),
304			      &notify_length);
305
306	pci_read_config_dword(pci_dev,
307			      notify + offsetof(struct virtio_pci_notify_cap,
308						cap.offset),
309			      &notify_offset);
310
311	/* We don't know how many VQs we'll map, ahead of the time.
312	 * If notify length is small, map it all now.
313	 * Otherwise, map each VQ individually later.
314	 */
315	if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
316		mdev->notify_base = vp_modern_map_capability(mdev, notify,
317							     2, 2,
318							     0, notify_length,
319							     &mdev->notify_len,
320							     &mdev->notify_pa);
321		if (!mdev->notify_base)
322			goto err_map_notify;
323	} else {
324		mdev->notify_map_cap = notify;
325	}
326
327	/* Again, we don't know how much we should map, but PAGE_SIZE
328	 * is more than enough for all existing devices.
329	 */
330	if (device) {
331		mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
332							0, PAGE_SIZE,
333							&mdev->device_len,
334							NULL);
335		if (!mdev->device)
336			goto err_map_device;
337	}
338
339	return 0;
340
341err_map_device:
342	if (mdev->notify_base)
343		pci_iounmap(pci_dev, mdev->notify_base);
344err_map_notify:
345	pci_iounmap(pci_dev, mdev->isr);
346err_map_isr:
347	pci_iounmap(pci_dev, mdev->common);
348err_map_common:
349	pci_release_selected_regions(pci_dev, mdev->modern_bars);
350	return err;
351}
352EXPORT_SYMBOL_GPL(vp_modern_probe);
353
354/*
355 * vp_modern_remove: remove and cleanup the modern virtio pci device
356 * @mdev: the modern virtio-pci device
357 */
358void vp_modern_remove(struct virtio_pci_modern_device *mdev)
359{
360	struct pci_dev *pci_dev = mdev->pci_dev;
361
362	if (mdev->device)
363		pci_iounmap(pci_dev, mdev->device);
364	if (mdev->notify_base)
365		pci_iounmap(pci_dev, mdev->notify_base);
366	pci_iounmap(pci_dev, mdev->isr);
367	pci_iounmap(pci_dev, mdev->common);
368	pci_release_selected_regions(pci_dev, mdev->modern_bars);
369}
370EXPORT_SYMBOL_GPL(vp_modern_remove);
371
372/*
373 * vp_modern_get_features - get features from device
374 * @mdev: the modern virtio-pci device
375 *
376 * Returns the features read from the device
377 */
378u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
379{
380	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
381
382	u64 features;
383
384	vp_iowrite32(0, &cfg->device_feature_select);
385	features = vp_ioread32(&cfg->device_feature);
386	vp_iowrite32(1, &cfg->device_feature_select);
387	features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
388
389	return features;
390}
391EXPORT_SYMBOL_GPL(vp_modern_get_features);
392
393/*
394 * vp_modern_get_driver_features - get driver features from device
395 * @mdev: the modern virtio-pci device
396 *
397 * Returns the driver features read from the device
398 */
399u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
400{
401	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
402
403	u64 features;
404
405	vp_iowrite32(0, &cfg->guest_feature_select);
406	features = vp_ioread32(&cfg->guest_feature);
407	vp_iowrite32(1, &cfg->guest_feature_select);
408	features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
409
410	return features;
411}
412EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
413
414/*
415 * vp_modern_set_features - set features to device
416 * @mdev: the modern virtio-pci device
417 * @features: the features set to device
418 */
419void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
420			    u64 features)
421{
422	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
423
424	vp_iowrite32(0, &cfg->guest_feature_select);
425	vp_iowrite32((u32)features, &cfg->guest_feature);
426	vp_iowrite32(1, &cfg->guest_feature_select);
427	vp_iowrite32(features >> 32, &cfg->guest_feature);
428}
429EXPORT_SYMBOL_GPL(vp_modern_set_features);
430
431/*
432 * vp_modern_generation - get the device genreation
433 * @mdev: the modern virtio-pci device
434 *
435 * Returns the genreation read from device
436 */
437u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
438{
439	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
440
441	return vp_ioread8(&cfg->config_generation);
442}
443EXPORT_SYMBOL_GPL(vp_modern_generation);
444
445/*
446 * vp_modern_get_status - get the device status
447 * @mdev: the modern virtio-pci device
448 *
449 * Returns the status read from device
450 */
451u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
452{
453	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
454
455	return vp_ioread8(&cfg->device_status);
456}
457EXPORT_SYMBOL_GPL(vp_modern_get_status);
458
459/*
460 * vp_modern_set_status - set status to device
461 * @mdev: the modern virtio-pci device
462 * @status: the status set to device
463 */
464void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
465				 u8 status)
466{
467	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
468
469	/*
470	 * Per memory-barriers.txt, wmb() is not needed to guarantee
471	 * that the cache coherent memory writes have completed
472	 * before writing to the MMIO region.
473	 */
474	vp_iowrite8(status, &cfg->device_status);
475}
476EXPORT_SYMBOL_GPL(vp_modern_set_status);
477
478/*
479 * vp_modern_get_queue_reset - get the queue reset status
480 * @mdev: the modern virtio-pci device
481 * @index: queue index
482 */
483int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
484{
485	struct virtio_pci_modern_common_cfg __iomem *cfg;
486
487	cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
488
489	vp_iowrite16(index, &cfg->cfg.queue_select);
490	return vp_ioread16(&cfg->queue_reset);
491}
492EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
493
494/*
495 * vp_modern_set_queue_reset - reset the queue
496 * @mdev: the modern virtio-pci device
497 * @index: queue index
498 */
499void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
500{
501	struct virtio_pci_modern_common_cfg __iomem *cfg;
502
503	cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
504
505	vp_iowrite16(index, &cfg->cfg.queue_select);
506	vp_iowrite16(1, &cfg->queue_reset);
507
508	while (vp_ioread16(&cfg->queue_reset))
509		msleep(1);
510
511	while (vp_ioread16(&cfg->cfg.queue_enable))
512		msleep(1);
513}
514EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
515
516/*
517 * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
518 * @mdev: the modern virtio-pci device
519 * @index: queue index
520 * @vector: the config vector
521 *
522 * Returns the config vector read from the device
523 */
524u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
525			   u16 index, u16 vector)
526{
527	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
528
529	vp_iowrite16(index, &cfg->queue_select);
530	vp_iowrite16(vector, &cfg->queue_msix_vector);
531	/* Flush the write out to device */
532	return vp_ioread16(&cfg->queue_msix_vector);
533}
534EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
535
536/*
537 * vp_modern_config_vector - set the vector for config interrupt
538 * @mdev: the modern virtio-pci device
539 * @vector: the config vector
540 *
541 * Returns the config vector read from the device
542 */
543u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
544			    u16 vector)
545{
546	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
547
548	/* Setup the vector used for configuration events */
549	vp_iowrite16(vector, &cfg->msix_config);
550	/* Verify we had enough resources to assign the vector */
551	/* Will also flush the write out to device */
552	return vp_ioread16(&cfg->msix_config);
553}
554EXPORT_SYMBOL_GPL(vp_modern_config_vector);
555
556/*
557 * vp_modern_queue_address - set the virtqueue address
558 * @mdev: the modern virtio-pci device
559 * @index: the queue index
560 * @desc_addr: address of the descriptor area
561 * @driver_addr: address of the driver area
562 * @device_addr: address of the device area
563 */
564void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
565			     u16 index, u64 desc_addr, u64 driver_addr,
566			     u64 device_addr)
567{
568	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
569
570	vp_iowrite16(index, &cfg->queue_select);
571
572	vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
573			     &cfg->queue_desc_hi);
574	vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
575			     &cfg->queue_avail_hi);
576	vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
577			     &cfg->queue_used_hi);
578}
579EXPORT_SYMBOL_GPL(vp_modern_queue_address);
580
581/*
582 * vp_modern_set_queue_enable - enable a virtqueue
583 * @mdev: the modern virtio-pci device
584 * @index: the queue index
585 * @enable: whether the virtqueue is enable or not
586 */
587void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
588				u16 index, bool enable)
589{
590	vp_iowrite16(index, &mdev->common->queue_select);
591	vp_iowrite16(enable, &mdev->common->queue_enable);
592}
593EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
594
595/*
596 * vp_modern_get_queue_enable - enable a virtqueue
597 * @mdev: the modern virtio-pci device
598 * @index: the queue index
599 *
600 * Returns whether a virtqueue is enabled or not
601 */
602bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
603				u16 index)
604{
605	vp_iowrite16(index, &mdev->common->queue_select);
606
607	return vp_ioread16(&mdev->common->queue_enable);
608}
609EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
610
611/*
612 * vp_modern_set_queue_size - set size for a virtqueue
613 * @mdev: the modern virtio-pci device
614 * @index: the queue index
615 * @size: the size of the virtqueue
616 */
617void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
618			      u16 index, u16 size)
619{
620	vp_iowrite16(index, &mdev->common->queue_select);
621	vp_iowrite16(size, &mdev->common->queue_size);
622
623}
624EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
625
626/*
627 * vp_modern_get_queue_size - get size for a virtqueue
628 * @mdev: the modern virtio-pci device
629 * @index: the queue index
630 *
631 * Returns the size of the virtqueue
632 */
633u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
634			     u16 index)
635{
636	vp_iowrite16(index, &mdev->common->queue_select);
637
638	return vp_ioread16(&mdev->common->queue_size);
639
640}
641EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
642
643/*
644 * vp_modern_get_num_queues - get the number of virtqueues
645 * @mdev: the modern virtio-pci device
646 *
647 * Returns the number of virtqueues
648 */
649u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
650{
651	return vp_ioread16(&mdev->common->num_queues);
652}
653EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
654
655/*
656 * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
657 * @mdev: the modern virtio-pci device
658 * @index: the queue index
659 *
660 * Returns the notification offset for a virtqueue
661 */
662static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
663					  u16 index)
664{
665	vp_iowrite16(index, &mdev->common->queue_select);
666
667	return vp_ioread16(&mdev->common->queue_notify_off);
668}
669
670/*
671 * vp_modern_map_vq_notify - map notification area for a
672 * specific virtqueue
673 * @mdev: the modern virtio-pci device
674 * @index: the queue index
675 * @pa: the pointer to the physical address of the nofity area
676 *
677 * Returns the address of the notification area
678 */
679void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
680				      u16 index, resource_size_t *pa)
681{
682	u16 off = vp_modern_get_queue_notify_off(mdev, index);
683
684	if (mdev->notify_base) {
685		/* offset should not wrap */
686		if ((u64)off * mdev->notify_offset_multiplier + 2
687			> mdev->notify_len) {
688			dev_warn(&mdev->pci_dev->dev,
689				 "bad notification offset %u (x %u) "
690				 "for queue %u > %zd",
691				 off, mdev->notify_offset_multiplier,
692				 index, mdev->notify_len);
693			return NULL;
694		}
695		if (pa)
696			*pa = mdev->notify_pa +
697			      off * mdev->notify_offset_multiplier;
698		return mdev->notify_base + off * mdev->notify_offset_multiplier;
699	} else {
700		return vp_modern_map_capability(mdev,
701				       mdev->notify_map_cap, 2, 2,
702				       off * mdev->notify_offset_multiplier, 2,
703				       NULL, pa);
704	}
705}
706EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
707
708MODULE_VERSION("0.1");
709MODULE_DESCRIPTION("Modern Virtio PCI Device");
710MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
711MODULE_LICENSE("GPL");