Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Mediated virtual PCI serial host device driver
   4 *
   5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
   6 *     Author: Neo Jia <cjia@nvidia.com>
   7 *             Kirti Wankhede <kwankhede@nvidia.com>
   8 *
 
 
 
 
   9 * Sample driver that creates mdev device that simulates serial port over PCI
  10 * card.
 
  11 */
  12
  13#include <linux/init.h>
  14#include <linux/module.h>
 
  15#include <linux/kernel.h>
  16#include <linux/fs.h>
  17#include <linux/poll.h>
  18#include <linux/slab.h>
  19#include <linux/cdev.h>
  20#include <linux/sched.h>
  21#include <linux/wait.h>
 
  22#include <linux/vfio.h>
  23#include <linux/iommu.h>
  24#include <linux/sysfs.h>
  25#include <linux/ctype.h>
  26#include <linux/file.h>
  27#include <linux/mdev.h>
  28#include <linux/pci.h>
  29#include <linux/serial.h>
  30#include <uapi/linux/serial_reg.h>
  31#include <linux/eventfd.h>
  32/*
  33 * #defines
  34 */
  35
  36#define VERSION_STRING  "0.1"
  37#define DRIVER_AUTHOR   "NVIDIA Corporation"
  38
  39#define MTTY_CLASS_NAME "mtty"
  40
  41#define MTTY_NAME       "mtty"
  42
  43#define MTTY_STRING_LEN		16
  44
  45#define MTTY_CONFIG_SPACE_SIZE  0xff
  46#define MTTY_IO_BAR_SIZE        0x8
  47#define MTTY_MMIO_BAR_SIZE      0x100000
  48
  49#define STORE_LE16(addr, val)   (*(u16 *)addr = val)
  50#define STORE_LE32(addr, val)   (*(u32 *)addr = val)
  51
  52#define MAX_FIFO_SIZE   16
  53
  54#define CIRCULAR_BUF_INC_IDX(idx)    (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
  55
  56#define MTTY_VFIO_PCI_OFFSET_SHIFT   40
  57
  58#define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
  59#define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
  60				((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
  61#define MTTY_VFIO_PCI_OFFSET_MASK    \
  62				(((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
  63#define MAX_MTTYS	24
  64
  65/*
  66 * Global Structures
  67 */
  68
  69static struct mtty_dev {
  70	dev_t		vd_devt;
  71	struct class	*vd_class;
  72	struct cdev	vd_cdev;
  73	struct idr	vd_idr;
  74	struct device	dev;
  75	struct mdev_parent parent;
  76} mtty_dev;
  77
  78struct mdev_region_info {
  79	u64 start;
  80	u64 phys_start;
  81	u32 size;
  82	u64 vfio_offset;
  83};
  84
  85#if defined(DEBUG_REGS)
  86static const char *wr_reg[] = {
  87	"TX",
  88	"IER",
  89	"FCR",
  90	"LCR",
  91	"MCR",
  92	"LSR",
  93	"MSR",
  94	"SCR"
  95};
  96
  97static const char *rd_reg[] = {
  98	"RX",
  99	"IER",
 100	"IIR",
 101	"LCR",
 102	"MCR",
 103	"LSR",
 104	"MSR",
 105	"SCR"
 106};
 107#endif
 108
 109/* loop back buffer */
 110struct rxtx {
 111	u8 fifo[MAX_FIFO_SIZE];
 112	u8 head, tail;
 113	u8 count;
 114};
 115
 116struct serial_port {
 117	u8 uart_reg[8];         /* 8 registers */
 118	struct rxtx rxtx;       /* loop back buffer */
 119	bool dlab;
 120	bool overrun;
 121	u16 divisor;
 122	u8 fcr;                 /* FIFO control register */
 123	u8 max_fifo_size;
 124	u8 intr_trigger_level;  /* interrupt trigger level */
 125};
 126
 127/* State of each mdev device */
 128struct mdev_state {
 129	struct vfio_device vdev;
 130	int irq_fd;
 131	struct eventfd_ctx *intx_evtfd;
 132	struct eventfd_ctx *msi_evtfd;
 133	int irq_index;
 134	u8 *vconfig;
 135	struct mutex ops_lock;
 136	struct mdev_device *mdev;
 137	struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
 138	u32 bar_mask[VFIO_PCI_NUM_REGIONS];
 139	struct list_head next;
 140	struct serial_port s[2];
 141	struct mutex rxtx_lock;
 142	struct vfio_device_info dev_info;
 143	int nr_ports;
 144};
 145
 146static struct mtty_type {
 147	struct mdev_type type;
 148	int nr_ports;
 149} mtty_types[2] = {
 150	{ .nr_ports = 1, .type.sysfs_name = "1",
 151	  .type.pretty_name = "Single port serial" },
 152	{ .nr_ports = 2, .type.sysfs_name = "2",
 153	  .type.pretty_name = "Dual port serial" },
 154};
 155
 156static struct mdev_type *mtty_mdev_types[] = {
 157	&mtty_types[0].type,
 158	&mtty_types[1].type,
 159};
 160
 161static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
 162
 163static const struct file_operations vd_fops = {
 164	.owner          = THIS_MODULE,
 165};
 166
 167static const struct vfio_device_ops mtty_dev_ops;
 168
 169/* function prototypes */
 170
 171static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
 172
 173/* Helper functions */
 
 
 
 174
 175static void dump_buffer(u8 *buf, uint32_t count)
 
 
 
 
 
 
 
 
 176{
 177#if defined(DEBUG)
 178	int i;
 179
 180	pr_info("Buffer:\n");
 181	for (i = 0; i < count; i++) {
 182		pr_info("%2x ", *(buf + i));
 183		if ((i + 1) % 16 == 0)
 184			pr_info("\n");
 185	}
 186#endif
 187}
 188
 189static void mtty_create_config_space(struct mdev_state *mdev_state)
 190{
 191	/* PCI dev ID */
 192	STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
 193
 194	/* Control: I/O+, Mem-, BusMaster- */
 195	STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
 196
 197	/* Status: capabilities list absent */
 198	STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
 199
 200	/* Rev ID */
 201	mdev_state->vconfig[0x8] =  0x10;
 202
 203	/* programming interface class : 16550-compatible serial controller */
 204	mdev_state->vconfig[0x9] =  0x02;
 205
 206	/* Sub class : 00 */
 207	mdev_state->vconfig[0xa] =  0x00;
 208
 209	/* Base class : Simple Communication controllers */
 210	mdev_state->vconfig[0xb] =  0x07;
 211
 212	/* base address registers */
 213	/* BAR0: IO space */
 214	STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
 215	mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
 216
 217	if (mdev_state->nr_ports == 2) {
 218		/* BAR1: IO space */
 219		STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
 220		mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
 221	}
 222
 223	/* Subsystem ID */
 224	STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
 225
 226	mdev_state->vconfig[0x34] =  0x00;   /* Cap Ptr */
 227	mdev_state->vconfig[0x3d] =  0x01;   /* interrupt pin (INTA#) */
 228
 229	/* Vendor specific data */
 230	mdev_state->vconfig[0x40] =  0x23;
 231	mdev_state->vconfig[0x43] =  0x80;
 232	mdev_state->vconfig[0x44] =  0x23;
 233	mdev_state->vconfig[0x48] =  0x23;
 234	mdev_state->vconfig[0x4c] =  0x23;
 235
 236	mdev_state->vconfig[0x60] =  0x50;
 237	mdev_state->vconfig[0x61] =  0x43;
 238	mdev_state->vconfig[0x62] =  0x49;
 239	mdev_state->vconfig[0x63] =  0x20;
 240	mdev_state->vconfig[0x64] =  0x53;
 241	mdev_state->vconfig[0x65] =  0x65;
 242	mdev_state->vconfig[0x66] =  0x72;
 243	mdev_state->vconfig[0x67] =  0x69;
 244	mdev_state->vconfig[0x68] =  0x61;
 245	mdev_state->vconfig[0x69] =  0x6c;
 246	mdev_state->vconfig[0x6a] =  0x2f;
 247	mdev_state->vconfig[0x6b] =  0x55;
 248	mdev_state->vconfig[0x6c] =  0x41;
 249	mdev_state->vconfig[0x6d] =  0x52;
 250	mdev_state->vconfig[0x6e] =  0x54;
 251}
 252
 253static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
 254				 u8 *buf, u32 count)
 255{
 256	u32 cfg_addr, bar_mask, bar_index = 0;
 257
 258	switch (offset) {
 259	case 0x04: /* device control */
 260	case 0x06: /* device status */
 261		/* do nothing */
 262		break;
 263	case 0x3c:  /* interrupt line */
 264		mdev_state->vconfig[0x3c] = buf[0];
 265		break;
 266	case 0x3d:
 267		/*
 268		 * Interrupt Pin is hardwired to INTA.
 269		 * This field is write protected by hardware
 270		 */
 271		break;
 272	case 0x10:  /* BAR0 */
 273	case 0x14:  /* BAR1 */
 274		if (offset == 0x10)
 275			bar_index = 0;
 276		else if (offset == 0x14)
 277			bar_index = 1;
 278
 279		if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
 280			STORE_LE32(&mdev_state->vconfig[offset], 0);
 281			break;
 282		}
 283
 284		cfg_addr = *(u32 *)buf;
 285		pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
 286
 287		if (cfg_addr == 0xffffffff) {
 288			bar_mask = mdev_state->bar_mask[bar_index];
 289			cfg_addr = (cfg_addr & bar_mask);
 290		}
 291
 292		cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
 293		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
 294		break;
 295	case 0x18:  /* BAR2 */
 296	case 0x1c:  /* BAR3 */
 297	case 0x20:  /* BAR4 */
 298		STORE_LE32(&mdev_state->vconfig[offset], 0);
 299		break;
 300	default:
 301		pr_info("PCI config write @0x%x of %d bytes not handled\n",
 302			offset, count);
 303		break;
 304	}
 305}
 306
 307static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
 308				u16 offset, u8 *buf, u32 count)
 309{
 310	u8 data = *buf;
 311
 312	/* Handle data written by guest */
 313	switch (offset) {
 314	case UART_TX:
 315		/* if DLAB set, data is LSB of divisor */
 316		if (mdev_state->s[index].dlab) {
 317			mdev_state->s[index].divisor |= data;
 318			break;
 319		}
 320
 321		mutex_lock(&mdev_state->rxtx_lock);
 322
 323		/* save in TX buffer */
 324		if (mdev_state->s[index].rxtx.count <
 325				mdev_state->s[index].max_fifo_size) {
 326			mdev_state->s[index].rxtx.fifo[
 327					mdev_state->s[index].rxtx.head] = data;
 328			mdev_state->s[index].rxtx.count++;
 329			CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
 330			mdev_state->s[index].overrun = false;
 331
 332			/*
 333			 * Trigger interrupt if receive data interrupt is
 334			 * enabled and fifo reached trigger level
 335			 */
 336			if ((mdev_state->s[index].uart_reg[UART_IER] &
 337						UART_IER_RDI) &&
 338			   (mdev_state->s[index].rxtx.count ==
 339				    mdev_state->s[index].intr_trigger_level)) {
 340				/* trigger interrupt */
 341#if defined(DEBUG_INTR)
 342				pr_err("Serial port %d: Fifo level trigger\n",
 343					index);
 344#endif
 345				mtty_trigger_interrupt(mdev_state);
 
 346			}
 347		} else {
 348#if defined(DEBUG_INTR)
 349			pr_err("Serial port %d: Buffer Overflow\n", index);
 350#endif
 351			mdev_state->s[index].overrun = true;
 352
 353			/*
 354			 * Trigger interrupt if receiver line status interrupt
 355			 * is enabled
 356			 */
 357			if (mdev_state->s[index].uart_reg[UART_IER] &
 358								UART_IER_RLSI)
 359				mtty_trigger_interrupt(mdev_state);
 
 360		}
 361		mutex_unlock(&mdev_state->rxtx_lock);
 362		break;
 363
 364	case UART_IER:
 365		/* if DLAB set, data is MSB of divisor */
 366		if (mdev_state->s[index].dlab)
 367			mdev_state->s[index].divisor |= (u16)data << 8;
 368		else {
 369			mdev_state->s[index].uart_reg[offset] = data;
 370			mutex_lock(&mdev_state->rxtx_lock);
 371			if ((data & UART_IER_THRI) &&
 372			    (mdev_state->s[index].rxtx.head ==
 373					mdev_state->s[index].rxtx.tail)) {
 374#if defined(DEBUG_INTR)
 375				pr_err("Serial port %d: IER_THRI write\n",
 376					index);
 377#endif
 378				mtty_trigger_interrupt(mdev_state);
 
 379			}
 380
 381			mutex_unlock(&mdev_state->rxtx_lock);
 382		}
 383
 384		break;
 385
 386	case UART_FCR:
 387		mdev_state->s[index].fcr = data;
 388
 389		mutex_lock(&mdev_state->rxtx_lock);
 390		if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
 391			/* clear loop back FIFO */
 392			mdev_state->s[index].rxtx.count = 0;
 393			mdev_state->s[index].rxtx.head = 0;
 394			mdev_state->s[index].rxtx.tail = 0;
 395		}
 396		mutex_unlock(&mdev_state->rxtx_lock);
 397
 398		switch (data & UART_FCR_TRIGGER_MASK) {
 399		case UART_FCR_TRIGGER_1:
 400			mdev_state->s[index].intr_trigger_level = 1;
 401			break;
 402
 403		case UART_FCR_TRIGGER_4:
 404			mdev_state->s[index].intr_trigger_level = 4;
 405			break;
 406
 407		case UART_FCR_TRIGGER_8:
 408			mdev_state->s[index].intr_trigger_level = 8;
 409			break;
 410
 411		case UART_FCR_TRIGGER_14:
 412			mdev_state->s[index].intr_trigger_level = 14;
 413			break;
 414		}
 415
 416		/*
 417		 * Set trigger level to 1 otherwise or  implement timer with
 418		 * timeout of 4 characters and on expiring that timer set
 419		 * Recevice data timeout in IIR register
 420		 */
 421		mdev_state->s[index].intr_trigger_level = 1;
 422		if (data & UART_FCR_ENABLE_FIFO)
 423			mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
 424		else {
 425			mdev_state->s[index].max_fifo_size = 1;
 426			mdev_state->s[index].intr_trigger_level = 1;
 427		}
 428
 429		break;
 430
 431	case UART_LCR:
 432		if (data & UART_LCR_DLAB) {
 433			mdev_state->s[index].dlab = true;
 434			mdev_state->s[index].divisor = 0;
 435		} else
 436			mdev_state->s[index].dlab = false;
 437
 438		mdev_state->s[index].uart_reg[offset] = data;
 439		break;
 440
 441	case UART_MCR:
 442		mdev_state->s[index].uart_reg[offset] = data;
 443
 444		if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
 445				(data & UART_MCR_OUT2)) {
 446#if defined(DEBUG_INTR)
 447			pr_err("Serial port %d: MCR_OUT2 write\n", index);
 448#endif
 449			mtty_trigger_interrupt(mdev_state);
 450		}
 451
 452		if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
 453				(data & (UART_MCR_RTS | UART_MCR_DTR))) {
 454#if defined(DEBUG_INTR)
 455			pr_err("Serial port %d: MCR RTS/DTR write\n", index);
 456#endif
 457			mtty_trigger_interrupt(mdev_state);
 458		}
 459		break;
 460
 461	case UART_LSR:
 462	case UART_MSR:
 463		/* do nothing */
 464		break;
 465
 466	case UART_SCR:
 467		mdev_state->s[index].uart_reg[offset] = data;
 468		break;
 469
 470	default:
 471		break;
 472	}
 473}
 474
 475static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
 476			    u16 offset, u8 *buf, u32 count)
 477{
 478	/* Handle read requests by guest */
 479	switch (offset) {
 480	case UART_RX:
 481		/* if DLAB set, data is LSB of divisor */
 482		if (mdev_state->s[index].dlab) {
 483			*buf  = (u8)mdev_state->s[index].divisor;
 484			break;
 485		}
 486
 487		mutex_lock(&mdev_state->rxtx_lock);
 488		/* return data in tx buffer */
 489		if (mdev_state->s[index].rxtx.head !=
 490				 mdev_state->s[index].rxtx.tail) {
 491			*buf = mdev_state->s[index].rxtx.fifo[
 492						mdev_state->s[index].rxtx.tail];
 493			mdev_state->s[index].rxtx.count--;
 494			CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
 495		}
 496
 497		if (mdev_state->s[index].rxtx.head ==
 498				mdev_state->s[index].rxtx.tail) {
 499		/*
 500		 *  Trigger interrupt if tx buffer empty interrupt is
 501		 *  enabled and fifo is empty
 502		 */
 503#if defined(DEBUG_INTR)
 504			pr_err("Serial port %d: Buffer Empty\n", index);
 505#endif
 506			if (mdev_state->s[index].uart_reg[UART_IER] &
 507							 UART_IER_THRI)
 508				mtty_trigger_interrupt(mdev_state);
 
 509		}
 510		mutex_unlock(&mdev_state->rxtx_lock);
 511
 512		break;
 513
 514	case UART_IER:
 515		if (mdev_state->s[index].dlab) {
 516			*buf = (u8)(mdev_state->s[index].divisor >> 8);
 517			break;
 518		}
 519		*buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
 520		break;
 521
 522	case UART_IIR:
 523	{
 524		u8 ier = mdev_state->s[index].uart_reg[UART_IER];
 525		*buf = 0;
 526
 527		mutex_lock(&mdev_state->rxtx_lock);
 528		/* Interrupt priority 1: Parity, overrun, framing or break */
 529		if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
 530			*buf |= UART_IIR_RLSI;
 531
 532		/* Interrupt priority 2: Fifo trigger level reached */
 533		if ((ier & UART_IER_RDI) &&
 534		    (mdev_state->s[index].rxtx.count >=
 535		      mdev_state->s[index].intr_trigger_level))
 536			*buf |= UART_IIR_RDI;
 537
 538		/* Interrupt priotiry 3: transmitter holding register empty */
 539		if ((ier & UART_IER_THRI) &&
 540		    (mdev_state->s[index].rxtx.head ==
 541				mdev_state->s[index].rxtx.tail))
 542			*buf |= UART_IIR_THRI;
 543
 544		/* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD  */
 545		if ((ier & UART_IER_MSI) &&
 546		    (mdev_state->s[index].uart_reg[UART_MCR] &
 547				 (UART_MCR_RTS | UART_MCR_DTR)))
 548			*buf |= UART_IIR_MSI;
 549
 550		/* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
 551		if (*buf == 0)
 552			*buf = UART_IIR_NO_INT;
 553
 554		/* set bit 6 & 7 to be 16550 compatible */
 555		*buf |= 0xC0;
 556		mutex_unlock(&mdev_state->rxtx_lock);
 557	}
 558	break;
 559
 560	case UART_LCR:
 561	case UART_MCR:
 562		*buf = mdev_state->s[index].uart_reg[offset];
 563		break;
 564
 565	case UART_LSR:
 566	{
 567		u8 lsr = 0;
 568
 569		mutex_lock(&mdev_state->rxtx_lock);
 570		/* atleast one char in FIFO */
 571		if (mdev_state->s[index].rxtx.head !=
 572				 mdev_state->s[index].rxtx.tail)
 573			lsr |= UART_LSR_DR;
 574
 575		/* if FIFO overrun */
 576		if (mdev_state->s[index].overrun)
 577			lsr |= UART_LSR_OE;
 578
 579		/* transmit FIFO empty and tramsitter empty */
 580		if (mdev_state->s[index].rxtx.head ==
 581				 mdev_state->s[index].rxtx.tail)
 582			lsr |= UART_LSR_TEMT | UART_LSR_THRE;
 583
 584		mutex_unlock(&mdev_state->rxtx_lock);
 585		*buf = lsr;
 586		break;
 587	}
 588	case UART_MSR:
 589		*buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
 590
 591		mutex_lock(&mdev_state->rxtx_lock);
 592		/* if AFE is 1 and FIFO have space, set CTS bit */
 593		if (mdev_state->s[index].uart_reg[UART_MCR] &
 594						 UART_MCR_AFE) {
 595			if (mdev_state->s[index].rxtx.count <
 596					mdev_state->s[index].max_fifo_size)
 597				*buf |= UART_MSR_CTS | UART_MSR_DCTS;
 598		} else
 599			*buf |= UART_MSR_CTS | UART_MSR_DCTS;
 600		mutex_unlock(&mdev_state->rxtx_lock);
 601
 602		break;
 603
 604	case UART_SCR:
 605		*buf = mdev_state->s[index].uart_reg[offset];
 606		break;
 607
 608	default:
 609		break;
 610	}
 611}
 612
 613static void mdev_read_base(struct mdev_state *mdev_state)
 614{
 615	int index, pos;
 616	u32 start_lo, start_hi;
 617	u32 mem_type;
 618
 619	pos = PCI_BASE_ADDRESS_0;
 620
 621	for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
 622
 623		if (!mdev_state->region_info[index].size)
 624			continue;
 625
 626		start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
 627			PCI_BASE_ADDRESS_MEM_MASK;
 628		mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
 629			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
 630
 631		switch (mem_type) {
 632		case PCI_BASE_ADDRESS_MEM_TYPE_64:
 633			start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
 634			pos += 4;
 635			break;
 636		case PCI_BASE_ADDRESS_MEM_TYPE_32:
 637		case PCI_BASE_ADDRESS_MEM_TYPE_1M:
 638			/* 1M mem BAR treated as 32-bit BAR */
 639		default:
 640			/* mem unknown type treated as 32-bit BAR */
 641			start_hi = 0;
 642			break;
 643		}
 644		pos += 4;
 645		mdev_state->region_info[index].start = ((u64)start_hi << 32) |
 646							start_lo;
 647	}
 648}
 649
 650static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
 651			   loff_t pos, bool is_write)
 652{
 
 653	unsigned int index;
 654	loff_t offset;
 655	int ret = 0;
 656
 657	if (!buf)
 
 
 
 
 
 658		return -EINVAL;
 
 659
 660	mutex_lock(&mdev_state->ops_lock);
 661
 662	index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
 663	offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
 664	switch (index) {
 665	case VFIO_PCI_CONFIG_REGION_INDEX:
 666
 667#if defined(DEBUG)
 668		pr_info("%s: PCI config space %s at offset 0x%llx\n",
 669			 __func__, is_write ? "write" : "read", offset);
 670#endif
 671		if (is_write) {
 672			dump_buffer(buf, count);
 673			handle_pci_cfg_write(mdev_state, offset, buf, count);
 674		} else {
 675			memcpy(buf, (mdev_state->vconfig + offset), count);
 676			dump_buffer(buf, count);
 677		}
 678
 679		break;
 680
 681	case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
 682		if (!mdev_state->region_info[index].start)
 683			mdev_read_base(mdev_state);
 684
 685		if (is_write) {
 686			dump_buffer(buf, count);
 687
 688#if defined(DEBUG_REGS)
 689			pr_info("%s: BAR%d  WR @0x%llx %s val:0x%02x dlab:%d\n",
 690				__func__, index, offset, wr_reg[offset],
 691				*buf, mdev_state->s[index].dlab);
 692#endif
 693			handle_bar_write(index, mdev_state, offset, buf, count);
 694		} else {
 695			handle_bar_read(index, mdev_state, offset, buf, count);
 696			dump_buffer(buf, count);
 697
 698#if defined(DEBUG_REGS)
 699			pr_info("%s: BAR%d  RD @0x%llx %s val:0x%02x dlab:%d\n",
 700				__func__, index, offset, rd_reg[offset],
 701				*buf, mdev_state->s[index].dlab);
 702#endif
 703		}
 704		break;
 705
 706	default:
 707		ret = -1;
 708		goto accessfailed;
 709	}
 710
 711	ret = count;
 712
 713
 714accessfailed:
 715	mutex_unlock(&mdev_state->ops_lock);
 716
 717	return ret;
 718}
 719
 720static int mtty_init_dev(struct vfio_device *vdev)
 721{
 722	struct mdev_state *mdev_state =
 723		container_of(vdev, struct mdev_state, vdev);
 724	struct mdev_device *mdev = to_mdev_device(vdev->dev);
 725	struct mtty_type *type =
 726		container_of(mdev->type, struct mtty_type, type);
 727	int avail_ports = atomic_read(&mdev_avail_ports);
 728	int ret;
 729
 730	do {
 731		if (avail_ports < type->nr_ports)
 732			return -ENOSPC;
 733	} while (!atomic_try_cmpxchg(&mdev_avail_ports,
 734				     &avail_ports,
 735				     avail_ports - type->nr_ports));
 
 
 
 
 
 
 
 
 736
 737	mdev_state->nr_ports = type->nr_ports;
 
 
 
 
 738	mdev_state->irq_index = -1;
 739	mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
 740	mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
 741	mutex_init(&mdev_state->rxtx_lock);
 742
 743	mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
 744	if (!mdev_state->vconfig) {
 745		ret = -ENOMEM;
 746		goto err_nr_ports;
 
 747	}
 748
 749	mutex_init(&mdev_state->ops_lock);
 750	mdev_state->mdev = mdev;
 751	mtty_create_config_space(mdev_state);
 752	return 0;
 753
 754err_nr_ports:
 755	atomic_add(type->nr_ports, &mdev_avail_ports);
 756	return ret;
 757}
 758
 759static int mtty_probe(struct mdev_device *mdev)
 760{
 761	struct mdev_state *mdev_state;
 762	int ret;
 763
 764	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
 765				       &mtty_dev_ops);
 766	if (IS_ERR(mdev_state))
 767		return PTR_ERR(mdev_state);
 768
 769	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
 770	if (ret)
 771		goto err_put_vdev;
 772	dev_set_drvdata(&mdev->dev, mdev_state);
 773	return 0;
 774
 775err_put_vdev:
 776	vfio_put_device(&mdev_state->vdev);
 777	return ret;
 778}
 779
 780static void mtty_release_dev(struct vfio_device *vdev)
 781{
 782	struct mdev_state *mdev_state =
 783		container_of(vdev, struct mdev_state, vdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784
 785	atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
 786	kfree(mdev_state->vconfig);
 787}
 788
 789static void mtty_remove(struct mdev_device *mdev)
 790{
 791	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
 792
 793	vfio_unregister_group_dev(&mdev_state->vdev);
 794	vfio_put_device(&mdev_state->vdev);
 795}
 
 
 
 796
 797static int mtty_reset(struct mdev_state *mdev_state)
 798{
 799	pr_info("%s: called\n", __func__);
 800
 801	return 0;
 802}
 803
 804static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
 805			 size_t count, loff_t *ppos)
 806{
 807	struct mdev_state *mdev_state =
 808		container_of(vdev, struct mdev_state, vdev);
 809	unsigned int done = 0;
 810	int ret;
 811
 812	while (count) {
 813		size_t filled;
 814
 815		if (count >= 4 && !(*ppos % 4)) {
 816			u32 val;
 817
 818			ret =  mdev_access(mdev_state, (u8 *)&val, sizeof(val),
 819					   *ppos, false);
 820			if (ret <= 0)
 821				goto read_err;
 822
 823			if (copy_to_user(buf, &val, sizeof(val)))
 824				goto read_err;
 825
 826			filled = 4;
 827		} else if (count >= 2 && !(*ppos % 2)) {
 828			u16 val;
 829
 830			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
 831					  *ppos, false);
 832			if (ret <= 0)
 833				goto read_err;
 834
 835			if (copy_to_user(buf, &val, sizeof(val)))
 836				goto read_err;
 837
 838			filled = 2;
 839		} else {
 840			u8 val;
 841
 842			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
 843					  *ppos, false);
 844			if (ret <= 0)
 845				goto read_err;
 846
 847			if (copy_to_user(buf, &val, sizeof(val)))
 848				goto read_err;
 849
 850			filled = 1;
 851		}
 852
 853		count -= filled;
 854		done += filled;
 855		*ppos += filled;
 856		buf += filled;
 857	}
 858
 859	return done;
 860
 861read_err:
 862	return -EFAULT;
 863}
 864
 865static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
 866		   size_t count, loff_t *ppos)
 867{
 868	struct mdev_state *mdev_state =
 869		container_of(vdev, struct mdev_state, vdev);
 870	unsigned int done = 0;
 871	int ret;
 872
 873	while (count) {
 874		size_t filled;
 875
 876		if (count >= 4 && !(*ppos % 4)) {
 877			u32 val;
 878
 879			if (copy_from_user(&val, buf, sizeof(val)))
 880				goto write_err;
 881
 882			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
 883					  *ppos, true);
 884			if (ret <= 0)
 885				goto write_err;
 886
 887			filled = 4;
 888		} else if (count >= 2 && !(*ppos % 2)) {
 889			u16 val;
 890
 891			if (copy_from_user(&val, buf, sizeof(val)))
 892				goto write_err;
 893
 894			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
 895					  *ppos, true);
 896			if (ret <= 0)
 897				goto write_err;
 898
 899			filled = 2;
 900		} else {
 901			u8 val;
 902
 903			if (copy_from_user(&val, buf, sizeof(val)))
 904				goto write_err;
 905
 906			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
 907					  *ppos, true);
 908			if (ret <= 0)
 909				goto write_err;
 910
 911			filled = 1;
 912		}
 913		count -= filled;
 914		done += filled;
 915		*ppos += filled;
 916		buf += filled;
 917	}
 918
 919	return done;
 920write_err:
 921	return -EFAULT;
 922}
 923
 924static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
 925			 unsigned int index, unsigned int start,
 926			 unsigned int count, void *data)
 927{
 928	int ret = 0;
 
 
 
 
 
 
 
 
 929
 930	mutex_lock(&mdev_state->ops_lock);
 931	switch (index) {
 932	case VFIO_PCI_INTX_IRQ_INDEX:
 933		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
 934		case VFIO_IRQ_SET_ACTION_MASK:
 935		case VFIO_IRQ_SET_ACTION_UNMASK:
 936			break;
 937		case VFIO_IRQ_SET_ACTION_TRIGGER:
 938		{
 939			if (flags & VFIO_IRQ_SET_DATA_NONE) {
 940				pr_info("%s: disable INTx\n", __func__);
 941				if (mdev_state->intx_evtfd)
 942					eventfd_ctx_put(mdev_state->intx_evtfd);
 943				break;
 944			}
 945
 946			if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 947				int fd = *(int *)data;
 948
 949				if (fd > 0) {
 950					struct eventfd_ctx *evt;
 951
 952					evt = eventfd_ctx_fdget(fd);
 953					if (IS_ERR(evt)) {
 954						ret = PTR_ERR(evt);
 955						break;
 956					}
 957					mdev_state->intx_evtfd = evt;
 958					mdev_state->irq_fd = fd;
 959					mdev_state->irq_index = index;
 960					break;
 961				}
 962			}
 963			break;
 964		}
 965		}
 966		break;
 967	case VFIO_PCI_MSI_IRQ_INDEX:
 968		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
 969		case VFIO_IRQ_SET_ACTION_MASK:
 970		case VFIO_IRQ_SET_ACTION_UNMASK:
 971			break;
 972		case VFIO_IRQ_SET_ACTION_TRIGGER:
 973			if (flags & VFIO_IRQ_SET_DATA_NONE) {
 974				if (mdev_state->msi_evtfd)
 975					eventfd_ctx_put(mdev_state->msi_evtfd);
 976				pr_info("%s: disable MSI\n", __func__);
 977				mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
 978				break;
 979			}
 980			if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 981				int fd = *(int *)data;
 982				struct eventfd_ctx *evt;
 983
 984				if (fd <= 0)
 985					break;
 986
 987				if (mdev_state->msi_evtfd)
 988					break;
 989
 990				evt = eventfd_ctx_fdget(fd);
 991				if (IS_ERR(evt)) {
 992					ret = PTR_ERR(evt);
 993					break;
 994				}
 995				mdev_state->msi_evtfd = evt;
 996				mdev_state->irq_fd = fd;
 997				mdev_state->irq_index = index;
 998			}
 999			break;
1000	}
1001	break;
1002	case VFIO_PCI_MSIX_IRQ_INDEX:
1003		pr_info("%s: MSIX_IRQ\n", __func__);
1004		break;
1005	case VFIO_PCI_ERR_IRQ_INDEX:
1006		pr_info("%s: ERR_IRQ\n", __func__);
1007		break;
1008	case VFIO_PCI_REQ_IRQ_INDEX:
1009		pr_info("%s: REQ_IRQ\n", __func__);
1010		break;
1011	}
1012
1013	mutex_unlock(&mdev_state->ops_lock);
1014	return ret;
1015}
1016
1017static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
1018{
1019	int ret = -1;
 
 
 
 
 
 
 
 
1020
1021	if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1022	    (!mdev_state->msi_evtfd))
1023		return -EINVAL;
1024	else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1025		 (!mdev_state->intx_evtfd)) {
1026		pr_info("%s: Intr eventfd not found\n", __func__);
1027		return -EINVAL;
1028	}
1029
1030	if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1031		ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1032	else
1033		ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1034
1035#if defined(DEBUG_INTR)
1036	pr_info("Intx triggered\n");
1037#endif
1038	if (ret != 1)
1039		pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1040
1041	return ret;
1042}
1043
1044static int mtty_get_region_info(struct mdev_state *mdev_state,
1045			 struct vfio_region_info *region_info,
1046			 u16 *cap_type_id, void **cap_type)
1047{
1048	unsigned int size = 0;
 
1049	u32 bar_index;
1050
 
 
 
 
 
 
 
1051	bar_index = region_info->index;
1052	if (bar_index >= VFIO_PCI_NUM_REGIONS)
1053		return -EINVAL;
1054
1055	mutex_lock(&mdev_state->ops_lock);
1056
1057	switch (bar_index) {
1058	case VFIO_PCI_CONFIG_REGION_INDEX:
1059		size = MTTY_CONFIG_SPACE_SIZE;
1060		break;
1061	case VFIO_PCI_BAR0_REGION_INDEX:
1062		size = MTTY_IO_BAR_SIZE;
1063		break;
1064	case VFIO_PCI_BAR1_REGION_INDEX:
1065		if (mdev_state->nr_ports == 2)
1066			size = MTTY_IO_BAR_SIZE;
1067		break;
1068	default:
1069		size = 0;
1070		break;
1071	}
1072
1073	mdev_state->region_info[bar_index].size = size;
1074	mdev_state->region_info[bar_index].vfio_offset =
1075		MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1076
1077	region_info->size = size;
1078	region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1079	region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1080		VFIO_REGION_INFO_FLAG_WRITE;
1081	mutex_unlock(&mdev_state->ops_lock);
1082	return 0;
1083}
1084
1085static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
1086{
1087	switch (irq_info->index) {
1088	case VFIO_PCI_INTX_IRQ_INDEX:
1089	case VFIO_PCI_MSI_IRQ_INDEX:
1090	case VFIO_PCI_REQ_IRQ_INDEX:
1091		break;
1092
1093	default:
1094		return -EINVAL;
1095	}
1096
1097	irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1098	irq_info->count = 1;
1099
1100	if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1101		irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1102				VFIO_IRQ_INFO_AUTOMASKED);
1103	else
1104		irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1105
1106	return 0;
1107}
1108
1109static int mtty_get_device_info(struct vfio_device_info *dev_info)
 
1110{
1111	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1112	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1113	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1114
1115	return 0;
1116}
1117
1118static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
1119			unsigned long arg)
1120{
1121	struct mdev_state *mdev_state =
1122		container_of(vdev, struct mdev_state, vdev);
1123	int ret = 0;
1124	unsigned long minsz;
 
 
 
 
 
 
 
 
1125
1126	switch (cmd) {
1127	case VFIO_DEVICE_GET_INFO:
1128	{
1129		struct vfio_device_info info;
1130
1131		minsz = offsetofend(struct vfio_device_info, num_irqs);
1132
1133		if (copy_from_user(&info, (void __user *)arg, minsz))
1134			return -EFAULT;
1135
1136		if (info.argsz < minsz)
1137			return -EINVAL;
1138
1139		ret = mtty_get_device_info(&info);
1140		if (ret)
1141			return ret;
1142
1143		memcpy(&mdev_state->dev_info, &info, sizeof(info));
1144
1145		if (copy_to_user((void __user *)arg, &info, minsz))
1146			return -EFAULT;
1147
1148		return 0;
1149	}
1150	case VFIO_DEVICE_GET_REGION_INFO:
1151	{
1152		struct vfio_region_info info;
1153		u16 cap_type_id = 0;
1154		void *cap_type = NULL;
1155
1156		minsz = offsetofend(struct vfio_region_info, offset);
1157
1158		if (copy_from_user(&info, (void __user *)arg, minsz))
1159			return -EFAULT;
1160
1161		if (info.argsz < minsz)
1162			return -EINVAL;
1163
1164		ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
1165					   &cap_type);
1166		if (ret)
1167			return ret;
1168
1169		if (copy_to_user((void __user *)arg, &info, minsz))
1170			return -EFAULT;
1171
1172		return 0;
1173	}
1174
1175	case VFIO_DEVICE_GET_IRQ_INFO:
1176	{
1177		struct vfio_irq_info info;
1178
1179		minsz = offsetofend(struct vfio_irq_info, count);
1180
1181		if (copy_from_user(&info, (void __user *)arg, minsz))
1182			return -EFAULT;
1183
1184		if ((info.argsz < minsz) ||
1185		    (info.index >= mdev_state->dev_info.num_irqs))
1186			return -EINVAL;
1187
1188		ret = mtty_get_irq_info(&info);
1189		if (ret)
1190			return ret;
1191
1192		if (copy_to_user((void __user *)arg, &info, minsz))
1193			return -EFAULT;
1194
1195		return 0;
1196	}
1197	case VFIO_DEVICE_SET_IRQS:
1198	{
1199		struct vfio_irq_set hdr;
1200		u8 *data = NULL, *ptr = NULL;
1201		size_t data_size = 0;
1202
1203		minsz = offsetofend(struct vfio_irq_set, count);
1204
1205		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1206			return -EFAULT;
1207
1208		ret = vfio_set_irqs_validate_and_prepare(&hdr,
1209						mdev_state->dev_info.num_irqs,
1210						VFIO_PCI_NUM_IRQS,
1211						&data_size);
1212		if (ret)
1213			return ret;
1214
1215		if (data_size) {
1216			ptr = data = memdup_user((void __user *)(arg + minsz),
1217						 data_size);
1218			if (IS_ERR(data))
1219				return PTR_ERR(data);
1220		}
1221
1222		ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
1223				    hdr.count, data);
1224
1225		kfree(ptr);
1226		return ret;
1227	}
1228	case VFIO_DEVICE_RESET:
1229		return mtty_reset(mdev_state);
1230	}
1231	return -ENOTTY;
1232}
1233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234static ssize_t
1235sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1236		     char *buf)
1237{
1238	return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
 
 
 
1239}
1240
1241static DEVICE_ATTR_RO(sample_mdev_dev);
1242
1243static struct attribute *mdev_dev_attrs[] = {
1244	&dev_attr_sample_mdev_dev.attr,
1245	NULL,
1246};
1247
1248static const struct attribute_group mdev_dev_group = {
1249	.name  = "vendor",
1250	.attrs = mdev_dev_attrs,
1251};
1252
1253static const struct attribute_group *mdev_dev_groups[] = {
1254	&mdev_dev_group,
1255	NULL,
1256};
1257
1258static unsigned int mtty_get_available(struct mdev_type *mtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1259{
1260	struct mtty_type *type = container_of(mtype, struct mtty_type, type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261
1262	return atomic_read(&mdev_avail_ports) / type->nr_ports;
 
 
 
 
 
 
 
 
 
 
 
 
1263}
1264
1265static const struct vfio_device_ops mtty_dev_ops = {
1266	.name = "vfio-mtty",
1267	.init = mtty_init_dev,
1268	.release = mtty_release_dev,
1269	.read = mtty_read,
1270	.write = mtty_write,
1271	.ioctl = mtty_ioctl,
1272};
1273
1274static struct mdev_driver mtty_driver = {
1275	.device_api = VFIO_DEVICE_API_PCI_STRING,
1276	.driver = {
1277		.name = "mtty",
1278		.owner = THIS_MODULE,
1279		.mod_name = KBUILD_MODNAME,
1280		.dev_groups = mdev_dev_groups,
1281	},
1282	.probe = mtty_probe,
1283	.remove	= mtty_remove,
1284	.get_available = mtty_get_available,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285};
1286
1287static void mtty_device_release(struct device *dev)
1288{
1289	dev_dbg(dev, "mtty: released\n");
1290}
1291
1292static int __init mtty_dev_init(void)
1293{
1294	int ret = 0;
1295
1296	pr_info("mtty_dev: %s\n", __func__);
1297
1298	memset(&mtty_dev, 0, sizeof(mtty_dev));
1299
1300	idr_init(&mtty_dev.vd_idr);
1301
1302	ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
1303				  MTTY_NAME);
1304
1305	if (ret < 0) {
1306		pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1307		return ret;
1308	}
1309
1310	cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1311	cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
1312
1313	pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1314
1315	ret = mdev_register_driver(&mtty_driver);
1316	if (ret)
1317		goto err_cdev;
1318
1319	mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1320
1321	if (IS_ERR(mtty_dev.vd_class)) {
1322		pr_err("Error: failed to register mtty_dev class\n");
1323		ret = PTR_ERR(mtty_dev.vd_class);
1324		goto err_driver;
1325	}
1326
1327	mtty_dev.dev.class = mtty_dev.vd_class;
1328	mtty_dev.dev.release = mtty_device_release;
1329	dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1330
1331	ret = device_register(&mtty_dev.dev);
1332	if (ret)
1333		goto err_put;
1334
1335	ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev,
1336				   &mtty_driver, mtty_mdev_types,
1337				   ARRAY_SIZE(mtty_mdev_types));
1338	if (ret)
1339		goto err_device;
1340	return 0;
 
 
 
 
1341
1342err_device:
1343	device_del(&mtty_dev.dev);
1344err_put:
1345	put_device(&mtty_dev.dev);
1346	class_destroy(mtty_dev.vd_class);
1347err_driver:
1348	mdev_unregister_driver(&mtty_driver);
1349err_cdev:
1350	cdev_del(&mtty_dev.vd_cdev);
1351	unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
 
 
1352	return ret;
1353}
1354
1355static void __exit mtty_dev_exit(void)
1356{
1357	mtty_dev.dev.bus = NULL;
1358	mdev_unregister_parent(&mtty_dev.parent);
1359
1360	device_unregister(&mtty_dev.dev);
1361	idr_destroy(&mtty_dev.vd_idr);
1362	mdev_unregister_driver(&mtty_driver);
1363	cdev_del(&mtty_dev.vd_cdev);
1364	unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
1365	class_destroy(mtty_dev.vd_class);
1366	mtty_dev.vd_class = NULL;
1367	pr_info("mtty_dev: Unloaded!\n");
1368}
1369
1370module_init(mtty_dev_init)
1371module_exit(mtty_dev_exit)
1372
1373MODULE_LICENSE("GPL v2");
1374MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1375MODULE_VERSION(VERSION_STRING);
1376MODULE_AUTHOR(DRIVER_AUTHOR);
v4.17
 
   1/*
   2 * Mediated virtual PCI serial host device driver
   3 *
   4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
   5 *     Author: Neo Jia <cjia@nvidia.com>
   6 *             Kirti Wankhede <kwankhede@nvidia.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * Sample driver that creates mdev device that simulates serial port over PCI
  13 * card.
  14 *
  15 */
  16
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/device.h>
  20#include <linux/kernel.h>
  21#include <linux/fs.h>
  22#include <linux/poll.h>
  23#include <linux/slab.h>
  24#include <linux/cdev.h>
  25#include <linux/sched.h>
  26#include <linux/wait.h>
  27#include <linux/uuid.h>
  28#include <linux/vfio.h>
  29#include <linux/iommu.h>
  30#include <linux/sysfs.h>
  31#include <linux/ctype.h>
  32#include <linux/file.h>
  33#include <linux/mdev.h>
  34#include <linux/pci.h>
  35#include <linux/serial.h>
  36#include <uapi/linux/serial_reg.h>
  37#include <linux/eventfd.h>
  38/*
  39 * #defines
  40 */
  41
  42#define VERSION_STRING  "0.1"
  43#define DRIVER_AUTHOR   "NVIDIA Corporation"
  44
  45#define MTTY_CLASS_NAME "mtty"
  46
  47#define MTTY_NAME       "mtty"
  48
  49#define MTTY_STRING_LEN		16
  50
  51#define MTTY_CONFIG_SPACE_SIZE  0xff
  52#define MTTY_IO_BAR_SIZE        0x8
  53#define MTTY_MMIO_BAR_SIZE      0x100000
  54
  55#define STORE_LE16(addr, val)   (*(u16 *)addr = val)
  56#define STORE_LE32(addr, val)   (*(u32 *)addr = val)
  57
  58#define MAX_FIFO_SIZE   16
  59
  60#define CIRCULAR_BUF_INC_IDX(idx)    (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
  61
  62#define MTTY_VFIO_PCI_OFFSET_SHIFT   40
  63
  64#define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
  65#define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
  66				((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
  67#define MTTY_VFIO_PCI_OFFSET_MASK    \
  68				(((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
  69#define MAX_MTTYS	24
  70
  71/*
  72 * Global Structures
  73 */
  74
  75struct mtty_dev {
  76	dev_t		vd_devt;
  77	struct class	*vd_class;
  78	struct cdev	vd_cdev;
  79	struct idr	vd_idr;
  80	struct device	dev;
 
  81} mtty_dev;
  82
  83struct mdev_region_info {
  84	u64 start;
  85	u64 phys_start;
  86	u32 size;
  87	u64 vfio_offset;
  88};
  89
  90#if defined(DEBUG_REGS)
  91const char *wr_reg[] = {
  92	"TX",
  93	"IER",
  94	"FCR",
  95	"LCR",
  96	"MCR",
  97	"LSR",
  98	"MSR",
  99	"SCR"
 100};
 101
 102const char *rd_reg[] = {
 103	"RX",
 104	"IER",
 105	"IIR",
 106	"LCR",
 107	"MCR",
 108	"LSR",
 109	"MSR",
 110	"SCR"
 111};
 112#endif
 113
 114/* loop back buffer */
 115struct rxtx {
 116	u8 fifo[MAX_FIFO_SIZE];
 117	u8 head, tail;
 118	u8 count;
 119};
 120
 121struct serial_port {
 122	u8 uart_reg[8];         /* 8 registers */
 123	struct rxtx rxtx;       /* loop back buffer */
 124	bool dlab;
 125	bool overrun;
 126	u16 divisor;
 127	u8 fcr;                 /* FIFO control register */
 128	u8 max_fifo_size;
 129	u8 intr_trigger_level;  /* interrupt trigger level */
 130};
 131
 132/* State of each mdev device */
 133struct mdev_state {
 
 134	int irq_fd;
 135	struct eventfd_ctx *intx_evtfd;
 136	struct eventfd_ctx *msi_evtfd;
 137	int irq_index;
 138	u8 *vconfig;
 139	struct mutex ops_lock;
 140	struct mdev_device *mdev;
 141	struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
 142	u32 bar_mask[VFIO_PCI_NUM_REGIONS];
 143	struct list_head next;
 144	struct serial_port s[2];
 145	struct mutex rxtx_lock;
 146	struct vfio_device_info dev_info;
 147	int nr_ports;
 148};
 149
 150struct mutex mdev_list_lock;
 151struct list_head mdev_devices_list;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152
 153static const struct file_operations vd_fops = {
 154	.owner          = THIS_MODULE,
 155};
 156
 
 
 157/* function prototypes */
 158
 159static int mtty_trigger_interrupt(uuid_le uuid);
 160
 161/* Helper functions */
 162static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
 163{
 164	struct mdev_state *mds;
 165
 166	list_for_each_entry(mds, &mdev_devices_list, next) {
 167		if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
 168			return mds;
 169	}
 170
 171	return NULL;
 172}
 173
 174void dump_buffer(char *buf, uint32_t count)
 175{
 176#if defined(DEBUG)
 177	int i;
 178
 179	pr_info("Buffer:\n");
 180	for (i = 0; i < count; i++) {
 181		pr_info("%2x ", *(buf + i));
 182		if ((i + 1) % 16 == 0)
 183			pr_info("\n");
 184	}
 185#endif
 186}
 187
 188static void mtty_create_config_space(struct mdev_state *mdev_state)
 189{
 190	/* PCI dev ID */
 191	STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
 192
 193	/* Control: I/O+, Mem-, BusMaster- */
 194	STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
 195
 196	/* Status: capabilities list absent */
 197	STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
 198
 199	/* Rev ID */
 200	mdev_state->vconfig[0x8] =  0x10;
 201
 202	/* programming interface class : 16550-compatible serial controller */
 203	mdev_state->vconfig[0x9] =  0x02;
 204
 205	/* Sub class : 00 */
 206	mdev_state->vconfig[0xa] =  0x00;
 207
 208	/* Base class : Simple Communication controllers */
 209	mdev_state->vconfig[0xb] =  0x07;
 210
 211	/* base address registers */
 212	/* BAR0: IO space */
 213	STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
 214	mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
 215
 216	if (mdev_state->nr_ports == 2) {
 217		/* BAR1: IO space */
 218		STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
 219		mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
 220	}
 221
 222	/* Subsystem ID */
 223	STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
 224
 225	mdev_state->vconfig[0x34] =  0x00;   /* Cap Ptr */
 226	mdev_state->vconfig[0x3d] =  0x01;   /* interrupt pin (INTA#) */
 227
 228	/* Vendor specific data */
 229	mdev_state->vconfig[0x40] =  0x23;
 230	mdev_state->vconfig[0x43] =  0x80;
 231	mdev_state->vconfig[0x44] =  0x23;
 232	mdev_state->vconfig[0x48] =  0x23;
 233	mdev_state->vconfig[0x4c] =  0x23;
 234
 235	mdev_state->vconfig[0x60] =  0x50;
 236	mdev_state->vconfig[0x61] =  0x43;
 237	mdev_state->vconfig[0x62] =  0x49;
 238	mdev_state->vconfig[0x63] =  0x20;
 239	mdev_state->vconfig[0x64] =  0x53;
 240	mdev_state->vconfig[0x65] =  0x65;
 241	mdev_state->vconfig[0x66] =  0x72;
 242	mdev_state->vconfig[0x67] =  0x69;
 243	mdev_state->vconfig[0x68] =  0x61;
 244	mdev_state->vconfig[0x69] =  0x6c;
 245	mdev_state->vconfig[0x6a] =  0x2f;
 246	mdev_state->vconfig[0x6b] =  0x55;
 247	mdev_state->vconfig[0x6c] =  0x41;
 248	mdev_state->vconfig[0x6d] =  0x52;
 249	mdev_state->vconfig[0x6e] =  0x54;
 250}
 251
 252static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
 253				 char *buf, u32 count)
 254{
 255	u32 cfg_addr, bar_mask, bar_index = 0;
 256
 257	switch (offset) {
 258	case 0x04: /* device control */
 259	case 0x06: /* device status */
 260		/* do nothing */
 261		break;
 262	case 0x3c:  /* interrupt line */
 263		mdev_state->vconfig[0x3c] = buf[0];
 264		break;
 265	case 0x3d:
 266		/*
 267		 * Interrupt Pin is hardwired to INTA.
 268		 * This field is write protected by hardware
 269		 */
 270		break;
 271	case 0x10:  /* BAR0 */
 272	case 0x14:  /* BAR1 */
 273		if (offset == 0x10)
 274			bar_index = 0;
 275		else if (offset == 0x14)
 276			bar_index = 1;
 277
 278		if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
 279			STORE_LE32(&mdev_state->vconfig[offset], 0);
 280			break;
 281		}
 282
 283		cfg_addr = *(u32 *)buf;
 284		pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
 285
 286		if (cfg_addr == 0xffffffff) {
 287			bar_mask = mdev_state->bar_mask[bar_index];
 288			cfg_addr = (cfg_addr & bar_mask);
 289		}
 290
 291		cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
 292		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
 293		break;
 294	case 0x18:  /* BAR2 */
 295	case 0x1c:  /* BAR3 */
 296	case 0x20:  /* BAR4 */
 297		STORE_LE32(&mdev_state->vconfig[offset], 0);
 298		break;
 299	default:
 300		pr_info("PCI config write @0x%x of %d bytes not handled\n",
 301			offset, count);
 302		break;
 303	}
 304}
 305
 306static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
 307				u16 offset, char *buf, u32 count)
 308{
 309	u8 data = *buf;
 310
 311	/* Handle data written by guest */
 312	switch (offset) {
 313	case UART_TX:
 314		/* if DLAB set, data is LSB of divisor */
 315		if (mdev_state->s[index].dlab) {
 316			mdev_state->s[index].divisor |= data;
 317			break;
 318		}
 319
 320		mutex_lock(&mdev_state->rxtx_lock);
 321
 322		/* save in TX buffer */
 323		if (mdev_state->s[index].rxtx.count <
 324				mdev_state->s[index].max_fifo_size) {
 325			mdev_state->s[index].rxtx.fifo[
 326					mdev_state->s[index].rxtx.head] = data;
 327			mdev_state->s[index].rxtx.count++;
 328			CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
 329			mdev_state->s[index].overrun = false;
 330
 331			/*
 332			 * Trigger interrupt if receive data interrupt is
 333			 * enabled and fifo reached trigger level
 334			 */
 335			if ((mdev_state->s[index].uart_reg[UART_IER] &
 336						UART_IER_RDI) &&
 337			   (mdev_state->s[index].rxtx.count ==
 338				    mdev_state->s[index].intr_trigger_level)) {
 339				/* trigger interrupt */
 340#if defined(DEBUG_INTR)
 341				pr_err("Serial port %d: Fifo level trigger\n",
 342					index);
 343#endif
 344				mtty_trigger_interrupt(
 345						mdev_uuid(mdev_state->mdev));
 346			}
 347		} else {
 348#if defined(DEBUG_INTR)
 349			pr_err("Serial port %d: Buffer Overflow\n", index);
 350#endif
 351			mdev_state->s[index].overrun = true;
 352
 353			/*
 354			 * Trigger interrupt if receiver line status interrupt
 355			 * is enabled
 356			 */
 357			if (mdev_state->s[index].uart_reg[UART_IER] &
 358								UART_IER_RLSI)
 359				mtty_trigger_interrupt(
 360						mdev_uuid(mdev_state->mdev));
 361		}
 362		mutex_unlock(&mdev_state->rxtx_lock);
 363		break;
 364
 365	case UART_IER:
 366		/* if DLAB set, data is MSB of divisor */
 367		if (mdev_state->s[index].dlab)
 368			mdev_state->s[index].divisor |= (u16)data << 8;
 369		else {
 370			mdev_state->s[index].uart_reg[offset] = data;
 371			mutex_lock(&mdev_state->rxtx_lock);
 372			if ((data & UART_IER_THRI) &&
 373			    (mdev_state->s[index].rxtx.head ==
 374					mdev_state->s[index].rxtx.tail)) {
 375#if defined(DEBUG_INTR)
 376				pr_err("Serial port %d: IER_THRI write\n",
 377					index);
 378#endif
 379				mtty_trigger_interrupt(
 380						mdev_uuid(mdev_state->mdev));
 381			}
 382
 383			mutex_unlock(&mdev_state->rxtx_lock);
 384		}
 385
 386		break;
 387
 388	case UART_FCR:
 389		mdev_state->s[index].fcr = data;
 390
 391		mutex_lock(&mdev_state->rxtx_lock);
 392		if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
 393			/* clear loop back FIFO */
 394			mdev_state->s[index].rxtx.count = 0;
 395			mdev_state->s[index].rxtx.head = 0;
 396			mdev_state->s[index].rxtx.tail = 0;
 397		}
 398		mutex_unlock(&mdev_state->rxtx_lock);
 399
 400		switch (data & UART_FCR_TRIGGER_MASK) {
 401		case UART_FCR_TRIGGER_1:
 402			mdev_state->s[index].intr_trigger_level = 1;
 403			break;
 404
 405		case UART_FCR_TRIGGER_4:
 406			mdev_state->s[index].intr_trigger_level = 4;
 407			break;
 408
 409		case UART_FCR_TRIGGER_8:
 410			mdev_state->s[index].intr_trigger_level = 8;
 411			break;
 412
 413		case UART_FCR_TRIGGER_14:
 414			mdev_state->s[index].intr_trigger_level = 14;
 415			break;
 416		}
 417
 418		/*
 419		 * Set trigger level to 1 otherwise or  implement timer with
 420		 * timeout of 4 characters and on expiring that timer set
 421		 * Recevice data timeout in IIR register
 422		 */
 423		mdev_state->s[index].intr_trigger_level = 1;
 424		if (data & UART_FCR_ENABLE_FIFO)
 425			mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
 426		else {
 427			mdev_state->s[index].max_fifo_size = 1;
 428			mdev_state->s[index].intr_trigger_level = 1;
 429		}
 430
 431		break;
 432
 433	case UART_LCR:
 434		if (data & UART_LCR_DLAB) {
 435			mdev_state->s[index].dlab = true;
 436			mdev_state->s[index].divisor = 0;
 437		} else
 438			mdev_state->s[index].dlab = false;
 439
 440		mdev_state->s[index].uart_reg[offset] = data;
 441		break;
 442
 443	case UART_MCR:
 444		mdev_state->s[index].uart_reg[offset] = data;
 445
 446		if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
 447				(data & UART_MCR_OUT2)) {
 448#if defined(DEBUG_INTR)
 449			pr_err("Serial port %d: MCR_OUT2 write\n", index);
 450#endif
 451			mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
 452		}
 453
 454		if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
 455				(data & (UART_MCR_RTS | UART_MCR_DTR))) {
 456#if defined(DEBUG_INTR)
 457			pr_err("Serial port %d: MCR RTS/DTR write\n", index);
 458#endif
 459			mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
 460		}
 461		break;
 462
 463	case UART_LSR:
 464	case UART_MSR:
 465		/* do nothing */
 466		break;
 467
 468	case UART_SCR:
 469		mdev_state->s[index].uart_reg[offset] = data;
 470		break;
 471
 472	default:
 473		break;
 474	}
 475}
 476
 477static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
 478			    u16 offset, char *buf, u32 count)
 479{
 480	/* Handle read requests by guest */
 481	switch (offset) {
 482	case UART_RX:
 483		/* if DLAB set, data is LSB of divisor */
 484		if (mdev_state->s[index].dlab) {
 485			*buf  = (u8)mdev_state->s[index].divisor;
 486			break;
 487		}
 488
 489		mutex_lock(&mdev_state->rxtx_lock);
 490		/* return data in tx buffer */
 491		if (mdev_state->s[index].rxtx.head !=
 492				 mdev_state->s[index].rxtx.tail) {
 493			*buf = mdev_state->s[index].rxtx.fifo[
 494						mdev_state->s[index].rxtx.tail];
 495			mdev_state->s[index].rxtx.count--;
 496			CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
 497		}
 498
 499		if (mdev_state->s[index].rxtx.head ==
 500				mdev_state->s[index].rxtx.tail) {
 501		/*
 502		 *  Trigger interrupt if tx buffer empty interrupt is
 503		 *  enabled and fifo is empty
 504		 */
 505#if defined(DEBUG_INTR)
 506			pr_err("Serial port %d: Buffer Empty\n", index);
 507#endif
 508			if (mdev_state->s[index].uart_reg[UART_IER] &
 509							 UART_IER_THRI)
 510				mtty_trigger_interrupt(
 511					mdev_uuid(mdev_state->mdev));
 512		}
 513		mutex_unlock(&mdev_state->rxtx_lock);
 514
 515		break;
 516
 517	case UART_IER:
 518		if (mdev_state->s[index].dlab) {
 519			*buf = (u8)(mdev_state->s[index].divisor >> 8);
 520			break;
 521		}
 522		*buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
 523		break;
 524
 525	case UART_IIR:
 526	{
 527		u8 ier = mdev_state->s[index].uart_reg[UART_IER];
 528		*buf = 0;
 529
 530		mutex_lock(&mdev_state->rxtx_lock);
 531		/* Interrupt priority 1: Parity, overrun, framing or break */
 532		if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
 533			*buf |= UART_IIR_RLSI;
 534
 535		/* Interrupt priority 2: Fifo trigger level reached */
 536		if ((ier & UART_IER_RDI) &&
 537		    (mdev_state->s[index].rxtx.count >=
 538		      mdev_state->s[index].intr_trigger_level))
 539			*buf |= UART_IIR_RDI;
 540
 541		/* Interrupt priotiry 3: transmitter holding register empty */
 542		if ((ier & UART_IER_THRI) &&
 543		    (mdev_state->s[index].rxtx.head ==
 544				mdev_state->s[index].rxtx.tail))
 545			*buf |= UART_IIR_THRI;
 546
 547		/* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD  */
 548		if ((ier & UART_IER_MSI) &&
 549		    (mdev_state->s[index].uart_reg[UART_MCR] &
 550				 (UART_MCR_RTS | UART_MCR_DTR)))
 551			*buf |= UART_IIR_MSI;
 552
 553		/* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
 554		if (*buf == 0)
 555			*buf = UART_IIR_NO_INT;
 556
 557		/* set bit 6 & 7 to be 16550 compatible */
 558		*buf |= 0xC0;
 559		mutex_unlock(&mdev_state->rxtx_lock);
 560	}
 561	break;
 562
 563	case UART_LCR:
 564	case UART_MCR:
 565		*buf = mdev_state->s[index].uart_reg[offset];
 566		break;
 567
 568	case UART_LSR:
 569	{
 570		u8 lsr = 0;
 571
 572		mutex_lock(&mdev_state->rxtx_lock);
 573		/* atleast one char in FIFO */
 574		if (mdev_state->s[index].rxtx.head !=
 575				 mdev_state->s[index].rxtx.tail)
 576			lsr |= UART_LSR_DR;
 577
 578		/* if FIFO overrun */
 579		if (mdev_state->s[index].overrun)
 580			lsr |= UART_LSR_OE;
 581
 582		/* transmit FIFO empty and tramsitter empty */
 583		if (mdev_state->s[index].rxtx.head ==
 584				 mdev_state->s[index].rxtx.tail)
 585			lsr |= UART_LSR_TEMT | UART_LSR_THRE;
 586
 587		mutex_unlock(&mdev_state->rxtx_lock);
 588		*buf = lsr;
 589		break;
 590	}
 591	case UART_MSR:
 592		*buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
 593
 594		mutex_lock(&mdev_state->rxtx_lock);
 595		/* if AFE is 1 and FIFO have space, set CTS bit */
 596		if (mdev_state->s[index].uart_reg[UART_MCR] &
 597						 UART_MCR_AFE) {
 598			if (mdev_state->s[index].rxtx.count <
 599					mdev_state->s[index].max_fifo_size)
 600				*buf |= UART_MSR_CTS | UART_MSR_DCTS;
 601		} else
 602			*buf |= UART_MSR_CTS | UART_MSR_DCTS;
 603		mutex_unlock(&mdev_state->rxtx_lock);
 604
 605		break;
 606
 607	case UART_SCR:
 608		*buf = mdev_state->s[index].uart_reg[offset];
 609		break;
 610
 611	default:
 612		break;
 613	}
 614}
 615
 616static void mdev_read_base(struct mdev_state *mdev_state)
 617{
 618	int index, pos;
 619	u32 start_lo, start_hi;
 620	u32 mem_type;
 621
 622	pos = PCI_BASE_ADDRESS_0;
 623
 624	for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
 625
 626		if (!mdev_state->region_info[index].size)
 627			continue;
 628
 629		start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
 630			PCI_BASE_ADDRESS_MEM_MASK;
 631		mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
 632			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
 633
 634		switch (mem_type) {
 635		case PCI_BASE_ADDRESS_MEM_TYPE_64:
 636			start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
 637			pos += 4;
 638			break;
 639		case PCI_BASE_ADDRESS_MEM_TYPE_32:
 640		case PCI_BASE_ADDRESS_MEM_TYPE_1M:
 641			/* 1M mem BAR treated as 32-bit BAR */
 642		default:
 643			/* mem unknown type treated as 32-bit BAR */
 644			start_hi = 0;
 645			break;
 646		}
 647		pos += 4;
 648		mdev_state->region_info[index].start = ((u64)start_hi << 32) |
 649							start_lo;
 650	}
 651}
 652
 653static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
 654			   loff_t pos, bool is_write)
 655{
 656	struct mdev_state *mdev_state;
 657	unsigned int index;
 658	loff_t offset;
 659	int ret = 0;
 660
 661	if (!mdev || !buf)
 662		return -EINVAL;
 663
 664	mdev_state = mdev_get_drvdata(mdev);
 665	if (!mdev_state) {
 666		pr_err("%s mdev_state not found\n", __func__);
 667		return -EINVAL;
 668	}
 669
 670	mutex_lock(&mdev_state->ops_lock);
 671
 672	index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
 673	offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
 674	switch (index) {
 675	case VFIO_PCI_CONFIG_REGION_INDEX:
 676
 677#if defined(DEBUG)
 678		pr_info("%s: PCI config space %s at offset 0x%llx\n",
 679			 __func__, is_write ? "write" : "read", offset);
 680#endif
 681		if (is_write) {
 682			dump_buffer(buf, count);
 683			handle_pci_cfg_write(mdev_state, offset, buf, count);
 684		} else {
 685			memcpy(buf, (mdev_state->vconfig + offset), count);
 686			dump_buffer(buf, count);
 687		}
 688
 689		break;
 690
 691	case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
 692		if (!mdev_state->region_info[index].start)
 693			mdev_read_base(mdev_state);
 694
 695		if (is_write) {
 696			dump_buffer(buf, count);
 697
 698#if defined(DEBUG_REGS)
 699			pr_info("%s: BAR%d  WR @0x%llx %s val:0x%02x dlab:%d\n",
 700				__func__, index, offset, wr_reg[offset],
 701				(u8)*buf, mdev_state->s[index].dlab);
 702#endif
 703			handle_bar_write(index, mdev_state, offset, buf, count);
 704		} else {
 705			handle_bar_read(index, mdev_state, offset, buf, count);
 706			dump_buffer(buf, count);
 707
 708#if defined(DEBUG_REGS)
 709			pr_info("%s: BAR%d  RD @0x%llx %s val:0x%02x dlab:%d\n",
 710				__func__, index, offset, rd_reg[offset],
 711				(u8)*buf, mdev_state->s[index].dlab);
 712#endif
 713		}
 714		break;
 715
 716	default:
 717		ret = -1;
 718		goto accessfailed;
 719	}
 720
 721	ret = count;
 722
 723
 724accessfailed:
 725	mutex_unlock(&mdev_state->ops_lock);
 726
 727	return ret;
 728}
 729
 730int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
 731{
 732	struct mdev_state *mdev_state;
 733	char name[MTTY_STRING_LEN];
 734	int nr_ports = 0, i;
 
 
 
 
 735
 736	if (!mdev)
 737		return -EINVAL;
 738
 739	for (i = 0; i < 2; i++) {
 740		snprintf(name, MTTY_STRING_LEN, "%s-%d",
 741			dev_driver_string(mdev_parent_dev(mdev)), i + 1);
 742		if (!strcmp(kobj->name, name)) {
 743			nr_ports = i + 1;
 744			break;
 745		}
 746	}
 747
 748	if (!nr_ports)
 749		return -EINVAL;
 750
 751	mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
 752	if (mdev_state == NULL)
 753		return -ENOMEM;
 754
 755	mdev_state->nr_ports = nr_ports;
 756	mdev_state->irq_index = -1;
 757	mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
 758	mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
 759	mutex_init(&mdev_state->rxtx_lock);
 
 760	mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
 761
 762	if (mdev_state->vconfig == NULL) {
 763		kfree(mdev_state);
 764		return -ENOMEM;
 765	}
 766
 767	mutex_init(&mdev_state->ops_lock);
 768	mdev_state->mdev = mdev;
 769	mdev_set_drvdata(mdev, mdev_state);
 
 
 
 
 
 
 770
 771	mtty_create_config_space(mdev_state);
 
 
 
 772
 773	mutex_lock(&mdev_list_lock);
 774	list_add(&mdev_state->next, &mdev_devices_list);
 775	mutex_unlock(&mdev_list_lock);
 
 776
 
 
 
 
 777	return 0;
 
 
 
 
 778}
 779
 780int mtty_remove(struct mdev_device *mdev)
 781{
 782	struct mdev_state *mds, *tmp_mds;
 783	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
 784	int ret = -EINVAL;
 785
 786	mutex_lock(&mdev_list_lock);
 787	list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
 788		if (mdev_state == mds) {
 789			list_del(&mdev_state->next);
 790			mdev_set_drvdata(mdev, NULL);
 791			kfree(mdev_state->vconfig);
 792			kfree(mdev_state);
 793			ret = 0;
 794			break;
 795		}
 796	}
 797	mutex_unlock(&mdev_list_lock);
 798
 799	return ret;
 
 800}
 801
 802int mtty_reset(struct mdev_device *mdev)
 803{
 804	struct mdev_state *mdev_state;
 805
 806	if (!mdev)
 807		return -EINVAL;
 808
 809	mdev_state = mdev_get_drvdata(mdev);
 810	if (!mdev_state)
 811		return -EINVAL;
 812
 
 
 813	pr_info("%s: called\n", __func__);
 814
 815	return 0;
 816}
 817
 818ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
 819		  loff_t *ppos)
 820{
 
 
 821	unsigned int done = 0;
 822	int ret;
 823
 824	while (count) {
 825		size_t filled;
 826
 827		if (count >= 4 && !(*ppos % 4)) {
 828			u32 val;
 829
 830			ret =  mdev_access(mdev, (char *)&val, sizeof(val),
 831					   *ppos, false);
 832			if (ret <= 0)
 833				goto read_err;
 834
 835			if (copy_to_user(buf, &val, sizeof(val)))
 836				goto read_err;
 837
 838			filled = 4;
 839		} else if (count >= 2 && !(*ppos % 2)) {
 840			u16 val;
 841
 842			ret = mdev_access(mdev, (char *)&val, sizeof(val),
 843					  *ppos, false);
 844			if (ret <= 0)
 845				goto read_err;
 846
 847			if (copy_to_user(buf, &val, sizeof(val)))
 848				goto read_err;
 849
 850			filled = 2;
 851		} else {
 852			u8 val;
 853
 854			ret = mdev_access(mdev, (char *)&val, sizeof(val),
 855					  *ppos, false);
 856			if (ret <= 0)
 857				goto read_err;
 858
 859			if (copy_to_user(buf, &val, sizeof(val)))
 860				goto read_err;
 861
 862			filled = 1;
 863		}
 864
 865		count -= filled;
 866		done += filled;
 867		*ppos += filled;
 868		buf += filled;
 869	}
 870
 871	return done;
 872
 873read_err:
 874	return -EFAULT;
 875}
 876
 877ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
 878		   size_t count, loff_t *ppos)
 879{
 
 
 880	unsigned int done = 0;
 881	int ret;
 882
 883	while (count) {
 884		size_t filled;
 885
 886		if (count >= 4 && !(*ppos % 4)) {
 887			u32 val;
 888
 889			if (copy_from_user(&val, buf, sizeof(val)))
 890				goto write_err;
 891
 892			ret = mdev_access(mdev, (char *)&val, sizeof(val),
 893					  *ppos, true);
 894			if (ret <= 0)
 895				goto write_err;
 896
 897			filled = 4;
 898		} else if (count >= 2 && !(*ppos % 2)) {
 899			u16 val;
 900
 901			if (copy_from_user(&val, buf, sizeof(val)))
 902				goto write_err;
 903
 904			ret = mdev_access(mdev, (char *)&val, sizeof(val),
 905					  *ppos, true);
 906			if (ret <= 0)
 907				goto write_err;
 908
 909			filled = 2;
 910		} else {
 911			u8 val;
 912
 913			if (copy_from_user(&val, buf, sizeof(val)))
 914				goto write_err;
 915
 916			ret = mdev_access(mdev, (char *)&val, sizeof(val),
 917					  *ppos, true);
 918			if (ret <= 0)
 919				goto write_err;
 920
 921			filled = 1;
 922		}
 923		count -= filled;
 924		done += filled;
 925		*ppos += filled;
 926		buf += filled;
 927	}
 928
 929	return done;
 930write_err:
 931	return -EFAULT;
 932}
 933
 934static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
 935			 unsigned int index, unsigned int start,
 936			 unsigned int count, void *data)
 937{
 938	int ret = 0;
 939	struct mdev_state *mdev_state;
 940
 941	if (!mdev)
 942		return -EINVAL;
 943
 944	mdev_state = mdev_get_drvdata(mdev);
 945	if (!mdev_state)
 946		return -EINVAL;
 947
 948	mutex_lock(&mdev_state->ops_lock);
 949	switch (index) {
 950	case VFIO_PCI_INTX_IRQ_INDEX:
 951		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
 952		case VFIO_IRQ_SET_ACTION_MASK:
 953		case VFIO_IRQ_SET_ACTION_UNMASK:
 954			break;
 955		case VFIO_IRQ_SET_ACTION_TRIGGER:
 956		{
 957			if (flags & VFIO_IRQ_SET_DATA_NONE) {
 958				pr_info("%s: disable INTx\n", __func__);
 959				if (mdev_state->intx_evtfd)
 960					eventfd_ctx_put(mdev_state->intx_evtfd);
 961				break;
 962			}
 963
 964			if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 965				int fd = *(int *)data;
 966
 967				if (fd > 0) {
 968					struct eventfd_ctx *evt;
 969
 970					evt = eventfd_ctx_fdget(fd);
 971					if (IS_ERR(evt)) {
 972						ret = PTR_ERR(evt);
 973						break;
 974					}
 975					mdev_state->intx_evtfd = evt;
 976					mdev_state->irq_fd = fd;
 977					mdev_state->irq_index = index;
 978					break;
 979				}
 980			}
 981			break;
 982		}
 983		}
 984		break;
 985	case VFIO_PCI_MSI_IRQ_INDEX:
 986		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
 987		case VFIO_IRQ_SET_ACTION_MASK:
 988		case VFIO_IRQ_SET_ACTION_UNMASK:
 989			break;
 990		case VFIO_IRQ_SET_ACTION_TRIGGER:
 991			if (flags & VFIO_IRQ_SET_DATA_NONE) {
 992				if (mdev_state->msi_evtfd)
 993					eventfd_ctx_put(mdev_state->msi_evtfd);
 994				pr_info("%s: disable MSI\n", __func__);
 995				mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
 996				break;
 997			}
 998			if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 999				int fd = *(int *)data;
1000				struct eventfd_ctx *evt;
1001
1002				if (fd <= 0)
1003					break;
1004
1005				if (mdev_state->msi_evtfd)
1006					break;
1007
1008				evt = eventfd_ctx_fdget(fd);
1009				if (IS_ERR(evt)) {
1010					ret = PTR_ERR(evt);
1011					break;
1012				}
1013				mdev_state->msi_evtfd = evt;
1014				mdev_state->irq_fd = fd;
1015				mdev_state->irq_index = index;
1016			}
1017			break;
1018	}
1019	break;
1020	case VFIO_PCI_MSIX_IRQ_INDEX:
1021		pr_info("%s: MSIX_IRQ\n", __func__);
1022		break;
1023	case VFIO_PCI_ERR_IRQ_INDEX:
1024		pr_info("%s: ERR_IRQ\n", __func__);
1025		break;
1026	case VFIO_PCI_REQ_IRQ_INDEX:
1027		pr_info("%s: REQ_IRQ\n", __func__);
1028		break;
1029	}
1030
1031	mutex_unlock(&mdev_state->ops_lock);
1032	return ret;
1033}
1034
1035static int mtty_trigger_interrupt(uuid_le uuid)
1036{
1037	int ret = -1;
1038	struct mdev_state *mdev_state;
1039
1040	mdev_state = find_mdev_state_by_uuid(uuid);
1041
1042	if (!mdev_state) {
1043		pr_info("%s: mdev not found\n", __func__);
1044		return -EINVAL;
1045	}
1046
1047	if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1048	    (!mdev_state->msi_evtfd))
1049		return -EINVAL;
1050	else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1051		 (!mdev_state->intx_evtfd)) {
1052		pr_info("%s: Intr eventfd not found\n", __func__);
1053		return -EINVAL;
1054	}
1055
1056	if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1057		ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1058	else
1059		ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1060
1061#if defined(DEBUG_INTR)
1062	pr_info("Intx triggered\n");
1063#endif
1064	if (ret != 1)
1065		pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1066
1067	return ret;
1068}
1069
1070int mtty_get_region_info(struct mdev_device *mdev,
1071			 struct vfio_region_info *region_info,
1072			 u16 *cap_type_id, void **cap_type)
1073{
1074	unsigned int size = 0;
1075	struct mdev_state *mdev_state;
1076	u32 bar_index;
1077
1078	if (!mdev)
1079		return -EINVAL;
1080
1081	mdev_state = mdev_get_drvdata(mdev);
1082	if (!mdev_state)
1083		return -EINVAL;
1084
1085	bar_index = region_info->index;
1086	if (bar_index >= VFIO_PCI_NUM_REGIONS)
1087		return -EINVAL;
1088
1089	mutex_lock(&mdev_state->ops_lock);
1090
1091	switch (bar_index) {
1092	case VFIO_PCI_CONFIG_REGION_INDEX:
1093		size = MTTY_CONFIG_SPACE_SIZE;
1094		break;
1095	case VFIO_PCI_BAR0_REGION_INDEX:
1096		size = MTTY_IO_BAR_SIZE;
1097		break;
1098	case VFIO_PCI_BAR1_REGION_INDEX:
1099		if (mdev_state->nr_ports == 2)
1100			size = MTTY_IO_BAR_SIZE;
1101		break;
1102	default:
1103		size = 0;
1104		break;
1105	}
1106
1107	mdev_state->region_info[bar_index].size = size;
1108	mdev_state->region_info[bar_index].vfio_offset =
1109		MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1110
1111	region_info->size = size;
1112	region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1113	region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1114		VFIO_REGION_INFO_FLAG_WRITE;
1115	mutex_unlock(&mdev_state->ops_lock);
1116	return 0;
1117}
1118
1119int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
1120{
1121	switch (irq_info->index) {
1122	case VFIO_PCI_INTX_IRQ_INDEX:
1123	case VFIO_PCI_MSI_IRQ_INDEX:
1124	case VFIO_PCI_REQ_IRQ_INDEX:
1125		break;
1126
1127	default:
1128		return -EINVAL;
1129	}
1130
1131	irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1132	irq_info->count = 1;
1133
1134	if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1135		irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1136				VFIO_IRQ_INFO_AUTOMASKED);
1137	else
1138		irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1139
1140	return 0;
1141}
1142
1143int mtty_get_device_info(struct mdev_device *mdev,
1144			 struct vfio_device_info *dev_info)
1145{
1146	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1147	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1148	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1149
1150	return 0;
1151}
1152
1153static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1154			unsigned long arg)
1155{
 
 
1156	int ret = 0;
1157	unsigned long minsz;
1158	struct mdev_state *mdev_state;
1159
1160	if (!mdev)
1161		return -EINVAL;
1162
1163	mdev_state = mdev_get_drvdata(mdev);
1164	if (!mdev_state)
1165		return -ENODEV;
1166
1167	switch (cmd) {
1168	case VFIO_DEVICE_GET_INFO:
1169	{
1170		struct vfio_device_info info;
1171
1172		minsz = offsetofend(struct vfio_device_info, num_irqs);
1173
1174		if (copy_from_user(&info, (void __user *)arg, minsz))
1175			return -EFAULT;
1176
1177		if (info.argsz < minsz)
1178			return -EINVAL;
1179
1180		ret = mtty_get_device_info(mdev, &info);
1181		if (ret)
1182			return ret;
1183
1184		memcpy(&mdev_state->dev_info, &info, sizeof(info));
1185
1186		if (copy_to_user((void __user *)arg, &info, minsz))
1187			return -EFAULT;
1188
1189		return 0;
1190	}
1191	case VFIO_DEVICE_GET_REGION_INFO:
1192	{
1193		struct vfio_region_info info;
1194		u16 cap_type_id = 0;
1195		void *cap_type = NULL;
1196
1197		minsz = offsetofend(struct vfio_region_info, offset);
1198
1199		if (copy_from_user(&info, (void __user *)arg, minsz))
1200			return -EFAULT;
1201
1202		if (info.argsz < minsz)
1203			return -EINVAL;
1204
1205		ret = mtty_get_region_info(mdev, &info, &cap_type_id,
1206					   &cap_type);
1207		if (ret)
1208			return ret;
1209
1210		if (copy_to_user((void __user *)arg, &info, minsz))
1211			return -EFAULT;
1212
1213		return 0;
1214	}
1215
1216	case VFIO_DEVICE_GET_IRQ_INFO:
1217	{
1218		struct vfio_irq_info info;
1219
1220		minsz = offsetofend(struct vfio_irq_info, count);
1221
1222		if (copy_from_user(&info, (void __user *)arg, minsz))
1223			return -EFAULT;
1224
1225		if ((info.argsz < minsz) ||
1226		    (info.index >= mdev_state->dev_info.num_irqs))
1227			return -EINVAL;
1228
1229		ret = mtty_get_irq_info(mdev, &info);
1230		if (ret)
1231			return ret;
1232
1233		if (copy_to_user((void __user *)arg, &info, minsz))
1234			return -EFAULT;
1235
1236		return 0;
1237	}
1238	case VFIO_DEVICE_SET_IRQS:
1239	{
1240		struct vfio_irq_set hdr;
1241		u8 *data = NULL, *ptr = NULL;
1242		size_t data_size = 0;
1243
1244		minsz = offsetofend(struct vfio_irq_set, count);
1245
1246		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1247			return -EFAULT;
1248
1249		ret = vfio_set_irqs_validate_and_prepare(&hdr,
1250						mdev_state->dev_info.num_irqs,
1251						VFIO_PCI_NUM_IRQS,
1252						&data_size);
1253		if (ret)
1254			return ret;
1255
1256		if (data_size) {
1257			ptr = data = memdup_user((void __user *)(arg + minsz),
1258						 data_size);
1259			if (IS_ERR(data))
1260				return PTR_ERR(data);
1261		}
1262
1263		ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
1264				    hdr.count, data);
1265
1266		kfree(ptr);
1267		return ret;
1268	}
1269	case VFIO_DEVICE_RESET:
1270		return mtty_reset(mdev);
1271	}
1272	return -ENOTTY;
1273}
1274
1275int mtty_open(struct mdev_device *mdev)
1276{
1277	pr_info("%s\n", __func__);
1278	return 0;
1279}
1280
1281void mtty_close(struct mdev_device *mdev)
1282{
1283	pr_info("%s\n", __func__);
1284}
1285
1286static ssize_t
1287sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1288		     char *buf)
1289{
1290	return sprintf(buf, "This is phy device\n");
1291}
1292
1293static DEVICE_ATTR_RO(sample_mtty_dev);
1294
1295static struct attribute *mtty_dev_attrs[] = {
1296	&dev_attr_sample_mtty_dev.attr,
1297	NULL,
1298};
1299
1300static const struct attribute_group mtty_dev_group = {
1301	.name  = "mtty_dev",
1302	.attrs = mtty_dev_attrs,
1303};
1304
1305const struct attribute_group *mtty_dev_groups[] = {
1306	&mtty_dev_group,
1307	NULL,
1308};
1309
1310static ssize_t
1311sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1312		     char *buf)
1313{
1314	if (mdev_from_dev(dev))
1315		return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
1316
1317	return sprintf(buf, "\n");
1318}
1319
1320static DEVICE_ATTR_RO(sample_mdev_dev);
1321
1322static struct attribute *mdev_dev_attrs[] = {
1323	&dev_attr_sample_mdev_dev.attr,
1324	NULL,
1325};
1326
1327static const struct attribute_group mdev_dev_group = {
1328	.name  = "vendor",
1329	.attrs = mdev_dev_attrs,
1330};
1331
1332const struct attribute_group *mdev_dev_groups[] = {
1333	&mdev_dev_group,
1334	NULL,
1335};
1336
1337static ssize_t
1338name_show(struct kobject *kobj, struct device *dev, char *buf)
1339{
1340	char name[MTTY_STRING_LEN];
1341	int i;
1342	const char *name_str[2] = {"Single port serial", "Dual port serial"};
1343
1344	for (i = 0; i < 2; i++) {
1345		snprintf(name, MTTY_STRING_LEN, "%s-%d",
1346			 dev_driver_string(dev), i + 1);
1347		if (!strcmp(kobj->name, name))
1348			return sprintf(buf, "%s\n", name_str[i]);
1349	}
1350
1351	return -EINVAL;
1352}
1353
1354MDEV_TYPE_ATTR_RO(name);
1355
1356static ssize_t
1357available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1358{
1359	char name[MTTY_STRING_LEN];
1360	int i;
1361	struct mdev_state *mds;
1362	int ports = 0, used = 0;
1363
1364	for (i = 0; i < 2; i++) {
1365		snprintf(name, MTTY_STRING_LEN, "%s-%d",
1366			 dev_driver_string(dev), i + 1);
1367		if (!strcmp(kobj->name, name)) {
1368			ports = i + 1;
1369			break;
1370		}
1371	}
1372
1373	if (!ports)
1374		return -EINVAL;
1375
1376	list_for_each_entry(mds, &mdev_devices_list, next)
1377		used += mds->nr_ports;
1378
1379	return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1380}
1381
1382MDEV_TYPE_ATTR_RO(available_instances);
1383
1384
1385static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1386			       char *buf)
1387{
1388	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1389}
1390
1391MDEV_TYPE_ATTR_RO(device_api);
1392
1393static struct attribute *mdev_types_attrs[] = {
1394	&mdev_type_attr_name.attr,
1395	&mdev_type_attr_device_api.attr,
1396	&mdev_type_attr_available_instances.attr,
1397	NULL,
1398};
1399
1400static struct attribute_group mdev_type_group1 = {
1401	.name  = "1",
1402	.attrs = mdev_types_attrs,
1403};
1404
1405static struct attribute_group mdev_type_group2 = {
1406	.name  = "2",
1407	.attrs = mdev_types_attrs,
1408};
1409
1410struct attribute_group *mdev_type_groups[] = {
1411	&mdev_type_group1,
1412	&mdev_type_group2,
1413	NULL,
1414};
1415
1416static const struct mdev_parent_ops mdev_fops = {
1417	.owner                  = THIS_MODULE,
1418	.dev_attr_groups        = mtty_dev_groups,
1419	.mdev_attr_groups       = mdev_dev_groups,
1420	.supported_type_groups  = mdev_type_groups,
1421	.create                 = mtty_create,
1422	.remove			= mtty_remove,
1423	.open                   = mtty_open,
1424	.release                = mtty_close,
1425	.read                   = mtty_read,
1426	.write                  = mtty_write,
1427	.ioctl		        = mtty_ioctl,
1428};
1429
1430static void mtty_device_release(struct device *dev)
1431{
1432	dev_dbg(dev, "mtty: released\n");
1433}
1434
1435static int __init mtty_dev_init(void)
1436{
1437	int ret = 0;
1438
1439	pr_info("mtty_dev: %s\n", __func__);
1440
1441	memset(&mtty_dev, 0, sizeof(mtty_dev));
1442
1443	idr_init(&mtty_dev.vd_idr);
1444
1445	ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
 
1446
1447	if (ret < 0) {
1448		pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1449		return ret;
1450	}
1451
1452	cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1453	cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
1454
1455	pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1456
 
 
 
 
1457	mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1458
1459	if (IS_ERR(mtty_dev.vd_class)) {
1460		pr_err("Error: failed to register mtty_dev class\n");
1461		ret = PTR_ERR(mtty_dev.vd_class);
1462		goto failed1;
1463	}
1464
1465	mtty_dev.dev.class = mtty_dev.vd_class;
1466	mtty_dev.dev.release = mtty_device_release;
1467	dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1468
1469	ret = device_register(&mtty_dev.dev);
1470	if (ret)
1471		goto failed2;
1472
1473	ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
 
 
1474	if (ret)
1475		goto failed3;
1476
1477	mutex_init(&mdev_list_lock);
1478	INIT_LIST_HEAD(&mdev_devices_list);
1479
1480	goto all_done;
1481
1482failed3:
1483
1484	device_unregister(&mtty_dev.dev);
1485failed2:
1486	class_destroy(mtty_dev.vd_class);
1487
1488failed1:
 
1489	cdev_del(&mtty_dev.vd_cdev);
1490	unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1491
1492all_done:
1493	return ret;
1494}
1495
1496static void __exit mtty_dev_exit(void)
1497{
1498	mtty_dev.dev.bus = NULL;
1499	mdev_unregister_device(&mtty_dev.dev);
1500
1501	device_unregister(&mtty_dev.dev);
1502	idr_destroy(&mtty_dev.vd_idr);
 
1503	cdev_del(&mtty_dev.vd_cdev);
1504	unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1505	class_destroy(mtty_dev.vd_class);
1506	mtty_dev.vd_class = NULL;
1507	pr_info("mtty_dev: Unloaded!\n");
1508}
1509
1510module_init(mtty_dev_init)
1511module_exit(mtty_dev_exit)
1512
1513MODULE_LICENSE("GPL v2");
1514MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1515MODULE_VERSION(VERSION_STRING);
1516MODULE_AUTHOR(DRIVER_AUTHOR);