Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Host side test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/fs.h>
  12#include <linux/io.h>
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/miscdevice.h>
  16#include <linux/module.h>
  17#include <linux/mutex.h>
  18#include <linux/random.h>
  19#include <linux/slab.h>
  20#include <linux/uaccess.h>
  21#include <linux/pci.h>
  22#include <linux/pci_ids.h>
  23
  24#include <linux/pci_regs.h>
  25
  26#include <uapi/linux/pcitest.h>
  27
  28#define DRV_MODULE_NAME				"pci-endpoint-test"
  29
  30#define IRQ_TYPE_UNDEFINED			-1
  31#define IRQ_TYPE_INTX				0
  32#define IRQ_TYPE_MSI				1
  33#define IRQ_TYPE_MSIX				2
  34
  35#define PCI_ENDPOINT_TEST_MAGIC			0x0
  36
  37#define PCI_ENDPOINT_TEST_COMMAND		0x4
  38#define COMMAND_RAISE_INTX_IRQ			BIT(0)
  39#define COMMAND_RAISE_MSI_IRQ			BIT(1)
  40#define COMMAND_RAISE_MSIX_IRQ			BIT(2)
  41#define COMMAND_READ				BIT(3)
  42#define COMMAND_WRITE				BIT(4)
  43#define COMMAND_COPY				BIT(5)
  44
  45#define PCI_ENDPOINT_TEST_STATUS		0x8
  46#define STATUS_READ_SUCCESS			BIT(0)
  47#define STATUS_READ_FAIL			BIT(1)
  48#define STATUS_WRITE_SUCCESS			BIT(2)
  49#define STATUS_WRITE_FAIL			BIT(3)
  50#define STATUS_COPY_SUCCESS			BIT(4)
  51#define STATUS_COPY_FAIL			BIT(5)
  52#define STATUS_IRQ_RAISED			BIT(6)
  53#define STATUS_SRC_ADDR_INVALID			BIT(7)
  54#define STATUS_DST_ADDR_INVALID			BIT(8)
  55
  56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
  57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
  58
  59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
  60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
  61
  62#define PCI_ENDPOINT_TEST_SIZE			0x1c
  63#define PCI_ENDPOINT_TEST_CHECKSUM		0x20
  64
  65#define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
  66#define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
  67
  68#define PCI_ENDPOINT_TEST_FLAGS			0x2c
  69#define FLAG_USE_DMA				BIT(0)
  70
 
  71#define PCI_DEVICE_ID_TI_AM654			0xb00c
  72#define PCI_DEVICE_ID_TI_J7200			0xb00f
  73#define PCI_DEVICE_ID_TI_AM64			0xb010
  74#define PCI_DEVICE_ID_TI_J721S2		0xb013
  75#define PCI_DEVICE_ID_LS1088A			0x80c0
  76#define PCI_DEVICE_ID_IMX8			0x0808
  77
  78#define is_am654_pci_dev(pdev)		\
  79		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  80
  81#define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
  82#define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
  83#define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
  84#define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
  85#define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
  86
  87static DEFINE_IDA(pci_endpoint_test_ida);
  88
  89#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  90					    miscdev)
  91
  92static bool no_msi;
  93module_param(no_msi, bool, 0444);
  94MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  95
  96static int irq_type = IRQ_TYPE_MSI;
  97module_param(irq_type, int, 0444);
  98MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  99
 100enum pci_barno {
 101	BAR_0,
 102	BAR_1,
 103	BAR_2,
 104	BAR_3,
 105	BAR_4,
 106	BAR_5,
 107};
 108
 109struct pci_endpoint_test {
 110	struct pci_dev	*pdev;
 111	void __iomem	*base;
 112	void __iomem	*bar[PCI_STD_NUM_BARS];
 113	struct completion irq_raised;
 114	int		last_irq;
 115	int		num_irqs;
 116	int		irq_type;
 117	/* mutex to protect the ioctls */
 118	struct mutex	mutex;
 119	struct miscdevice miscdev;
 120	enum pci_barno test_reg_bar;
 121	size_t alignment;
 122	const char *name;
 123};
 124
 125struct pci_endpoint_test_data {
 126	enum pci_barno test_reg_bar;
 127	size_t alignment;
 128	int irq_type;
 129};
 130
 131static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
 132					  u32 offset)
 133{
 134	return readl(test->base + offset);
 135}
 136
 137static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
 138					    u32 offset, u32 value)
 139{
 140	writel(value, test->base + offset);
 141}
 142
 143static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
 144					      int bar, int offset)
 145{
 146	return readl(test->bar[bar] + offset);
 147}
 148
 149static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
 150						int bar, u32 offset, u32 value)
 151{
 152	writel(value, test->bar[bar] + offset);
 153}
 154
 155static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
 156{
 157	struct pci_endpoint_test *test = dev_id;
 158	u32 reg;
 159
 160	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
 161	if (reg & STATUS_IRQ_RAISED) {
 162		test->last_irq = irq;
 163		complete(&test->irq_raised);
 
 164	}
 
 
 165
 166	return IRQ_HANDLED;
 167}
 168
 169static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
 170{
 171	struct pci_dev *pdev = test->pdev;
 172
 173	pci_free_irq_vectors(pdev);
 174	test->irq_type = IRQ_TYPE_UNDEFINED;
 175}
 176
 177static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
 178						int type)
 179{
 180	int irq = -1;
 181	struct pci_dev *pdev = test->pdev;
 182	struct device *dev = &pdev->dev;
 183	bool res = true;
 184
 185	switch (type) {
 186	case IRQ_TYPE_INTX:
 187		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
 188		if (irq < 0)
 189			dev_err(dev, "Failed to get Legacy interrupt\n");
 190		break;
 191	case IRQ_TYPE_MSI:
 192		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
 193		if (irq < 0)
 194			dev_err(dev, "Failed to get MSI interrupts\n");
 195		break;
 196	case IRQ_TYPE_MSIX:
 197		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
 198		if (irq < 0)
 199			dev_err(dev, "Failed to get MSI-X interrupts\n");
 200		break;
 201	default:
 202		dev_err(dev, "Invalid IRQ type selected\n");
 203	}
 204
 205	if (irq < 0) {
 206		irq = 0;
 207		res = false;
 208	}
 209
 210	test->irq_type = type;
 211	test->num_irqs = irq;
 212
 213	return res;
 214}
 215
 216static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
 217{
 218	int i;
 219	struct pci_dev *pdev = test->pdev;
 220	struct device *dev = &pdev->dev;
 221
 222	for (i = 0; i < test->num_irqs; i++)
 223		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
 224
 225	test->num_irqs = 0;
 226}
 227
 228static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
 229{
 230	int i;
 231	int err;
 232	struct pci_dev *pdev = test->pdev;
 233	struct device *dev = &pdev->dev;
 234
 235	for (i = 0; i < test->num_irqs; i++) {
 236		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
 237				       pci_endpoint_test_irqhandler,
 238				       IRQF_SHARED, test->name, test);
 239		if (err)
 240			goto fail;
 241	}
 242
 243	return true;
 244
 245fail:
 246	switch (irq_type) {
 247	case IRQ_TYPE_INTX:
 248		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
 249			pci_irq_vector(pdev, i));
 250		break;
 251	case IRQ_TYPE_MSI:
 252		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
 253			pci_irq_vector(pdev, i),
 254			i + 1);
 255		break;
 256	case IRQ_TYPE_MSIX:
 257		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
 258			pci_irq_vector(pdev, i),
 259			i + 1);
 260		break;
 261	}
 262
 263	return false;
 264}
 265
 266static const u32 bar_test_pattern[] = {
 267	0xA0A0A0A0,
 268	0xA1A1A1A1,
 269	0xA2A2A2A2,
 270	0xA3A3A3A3,
 271	0xA4A4A4A4,
 272	0xA5A5A5A5,
 273};
 274
 275static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
 276				  enum pci_barno barno)
 277{
 278	int j;
 279	u32 val;
 280	int size;
 281	struct pci_dev *pdev = test->pdev;
 282
 283	if (!test->bar[barno])
 284		return false;
 285
 286	size = pci_resource_len(pdev, barno);
 287
 288	if (barno == test->test_reg_bar)
 289		size = 0x4;
 290
 291	for (j = 0; j < size; j += 4)
 292		pci_endpoint_test_bar_writel(test, barno, j,
 293					     bar_test_pattern[barno]);
 294
 295	for (j = 0; j < size; j += 4) {
 296		val = pci_endpoint_test_bar_readl(test, barno, j);
 297		if (val != bar_test_pattern[barno])
 298			return false;
 299	}
 300
 301	return true;
 302}
 303
 304static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
 305{
 306	u32 val;
 307
 308	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
 309				 IRQ_TYPE_INTX);
 310	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
 311	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 312				 COMMAND_RAISE_INTX_IRQ);
 313	val = wait_for_completion_timeout(&test->irq_raised,
 314					  msecs_to_jiffies(1000));
 315	if (!val)
 316		return false;
 317
 318	return true;
 319}
 320
 321static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
 322				       u16 msi_num, bool msix)
 323{
 324	u32 val;
 325	struct pci_dev *pdev = test->pdev;
 326
 327	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
 328				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
 
 329	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
 330	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 331				 msix ? COMMAND_RAISE_MSIX_IRQ :
 332				 COMMAND_RAISE_MSI_IRQ);
 333	val = wait_for_completion_timeout(&test->irq_raised,
 334					  msecs_to_jiffies(1000));
 335	if (!val)
 336		return false;
 337
 338	return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
 339}
 340
 341static int pci_endpoint_test_validate_xfer_params(struct device *dev,
 342		struct pci_endpoint_test_xfer_param *param, size_t alignment)
 343{
 344	if (!param->size) {
 345		dev_dbg(dev, "Data size is zero\n");
 346		return -EINVAL;
 347	}
 348
 349	if (param->size > SIZE_MAX - alignment) {
 350		dev_dbg(dev, "Maximum transfer data size exceeded\n");
 351		return -EINVAL;
 352	}
 353
 354	return 0;
 355}
 356
 357static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
 358				   unsigned long arg)
 359{
 360	struct pci_endpoint_test_xfer_param param;
 361	bool ret = false;
 362	void *src_addr;
 363	void *dst_addr;
 364	u32 flags = 0;
 365	bool use_dma;
 366	size_t size;
 367	dma_addr_t src_phys_addr;
 368	dma_addr_t dst_phys_addr;
 369	struct pci_dev *pdev = test->pdev;
 370	struct device *dev = &pdev->dev;
 371	void *orig_src_addr;
 372	dma_addr_t orig_src_phys_addr;
 373	void *orig_dst_addr;
 374	dma_addr_t orig_dst_phys_addr;
 375	size_t offset;
 376	size_t alignment = test->alignment;
 377	int irq_type = test->irq_type;
 378	u32 src_crc32;
 379	u32 dst_crc32;
 380	int err;
 381
 382	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 383	if (err) {
 384		dev_err(dev, "Failed to get transfer param\n");
 385		return false;
 386	}
 387
 388	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 389	if (err)
 390		return false;
 391
 392	size = param.size;
 
 
 393
 394	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 395	if (use_dma)
 396		flags |= FLAG_USE_DMA;
 397
 398	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
 399		dev_err(dev, "Invalid IRQ type option\n");
 400		goto err;
 401	}
 402
 403	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
 404	if (!orig_src_addr) {
 405		dev_err(dev, "Failed to allocate source buffer\n");
 406		ret = false;
 407		goto err;
 408	}
 409
 410	get_random_bytes(orig_src_addr, size + alignment);
 411	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
 412					    size + alignment, DMA_TO_DEVICE);
 413	if (dma_mapping_error(dev, orig_src_phys_addr)) {
 414		dev_err(dev, "failed to map source buffer address\n");
 415		ret = false;
 416		goto err_src_phys_addr;
 417	}
 418
 419	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
 420		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
 421		offset = src_phys_addr - orig_src_phys_addr;
 422		src_addr = orig_src_addr + offset;
 423	} else {
 424		src_phys_addr = orig_src_phys_addr;
 425		src_addr = orig_src_addr;
 426	}
 427
 428	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
 429				 lower_32_bits(src_phys_addr));
 430
 431	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
 432				 upper_32_bits(src_phys_addr));
 433
 434	src_crc32 = crc32_le(~0, src_addr, size);
 435
 436	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
 437	if (!orig_dst_addr) {
 438		dev_err(dev, "Failed to allocate destination address\n");
 439		ret = false;
 440		goto err_dst_addr;
 441	}
 442
 443	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
 444					    size + alignment, DMA_FROM_DEVICE);
 445	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
 446		dev_err(dev, "failed to map destination buffer address\n");
 447		ret = false;
 448		goto err_dst_phys_addr;
 449	}
 450
 451	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
 452		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
 453		offset = dst_phys_addr - orig_dst_phys_addr;
 454		dst_addr = orig_dst_addr + offset;
 455	} else {
 456		dst_phys_addr = orig_dst_phys_addr;
 457		dst_addr = orig_dst_addr;
 458	}
 459
 460	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
 461				 lower_32_bits(dst_phys_addr));
 462	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
 463				 upper_32_bits(dst_phys_addr));
 464
 465	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
 466				 size);
 467
 468	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 469	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 470	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 471	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 472				 COMMAND_COPY);
 473
 474	wait_for_completion(&test->irq_raised);
 475
 476	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
 477			 DMA_FROM_DEVICE);
 478
 479	dst_crc32 = crc32_le(~0, dst_addr, size);
 480	if (dst_crc32 == src_crc32)
 481		ret = true;
 482
 483err_dst_phys_addr:
 484	kfree(orig_dst_addr);
 485
 486err_dst_addr:
 487	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
 488			 DMA_TO_DEVICE);
 489
 490err_src_phys_addr:
 491	kfree(orig_src_addr);
 492
 493err:
 494	return ret;
 495}
 496
 497static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
 498				    unsigned long arg)
 499{
 500	struct pci_endpoint_test_xfer_param param;
 501	bool ret = false;
 502	u32 flags = 0;
 503	bool use_dma;
 504	u32 reg;
 505	void *addr;
 506	dma_addr_t phys_addr;
 507	struct pci_dev *pdev = test->pdev;
 508	struct device *dev = &pdev->dev;
 509	void *orig_addr;
 510	dma_addr_t orig_phys_addr;
 511	size_t offset;
 512	size_t alignment = test->alignment;
 513	int irq_type = test->irq_type;
 514	size_t size;
 515	u32 crc32;
 516	int err;
 517
 518	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 519	if (err != 0) {
 520		dev_err(dev, "Failed to get transfer param\n");
 521		return false;
 522	}
 523
 524	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 525	if (err)
 526		return false;
 527
 528	size = param.size;
 
 
 529
 530	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 531	if (use_dma)
 532		flags |= FLAG_USE_DMA;
 533
 534	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
 535		dev_err(dev, "Invalid IRQ type option\n");
 536		goto err;
 537	}
 538
 539	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
 540	if (!orig_addr) {
 541		dev_err(dev, "Failed to allocate address\n");
 542		ret = false;
 543		goto err;
 544	}
 545
 546	get_random_bytes(orig_addr, size + alignment);
 547
 548	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
 549					DMA_TO_DEVICE);
 550	if (dma_mapping_error(dev, orig_phys_addr)) {
 551		dev_err(dev, "failed to map source buffer address\n");
 552		ret = false;
 553		goto err_phys_addr;
 554	}
 555
 556	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
 557		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
 558		offset = phys_addr - orig_phys_addr;
 559		addr = orig_addr + offset;
 560	} else {
 561		phys_addr = orig_phys_addr;
 562		addr = orig_addr;
 563	}
 564
 565	crc32 = crc32_le(~0, addr, size);
 566	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
 567				 crc32);
 568
 569	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
 570				 lower_32_bits(phys_addr));
 571	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
 572				 upper_32_bits(phys_addr));
 573
 574	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
 575
 576	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 577	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 578	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 579	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 580				 COMMAND_READ);
 581
 582	wait_for_completion(&test->irq_raised);
 583
 584	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
 585	if (reg & STATUS_READ_SUCCESS)
 586		ret = true;
 587
 588	dma_unmap_single(dev, orig_phys_addr, size + alignment,
 589			 DMA_TO_DEVICE);
 590
 591err_phys_addr:
 592	kfree(orig_addr);
 593
 594err:
 595	return ret;
 596}
 597
 598static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
 599				   unsigned long arg)
 600{
 601	struct pci_endpoint_test_xfer_param param;
 602	bool ret = false;
 603	u32 flags = 0;
 604	bool use_dma;
 605	size_t size;
 606	void *addr;
 607	dma_addr_t phys_addr;
 608	struct pci_dev *pdev = test->pdev;
 609	struct device *dev = &pdev->dev;
 610	void *orig_addr;
 611	dma_addr_t orig_phys_addr;
 612	size_t offset;
 613	size_t alignment = test->alignment;
 614	int irq_type = test->irq_type;
 615	u32 crc32;
 616	int err;
 617
 618	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 619	if (err) {
 620		dev_err(dev, "Failed to get transfer param\n");
 621		return false;
 622	}
 623
 624	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 625	if (err)
 626		return false;
 627
 628	size = param.size;
 
 
 629
 630	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 631	if (use_dma)
 632		flags |= FLAG_USE_DMA;
 633
 634	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
 635		dev_err(dev, "Invalid IRQ type option\n");
 636		goto err;
 637	}
 638
 639	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
 640	if (!orig_addr) {
 641		dev_err(dev, "Failed to allocate destination address\n");
 642		ret = false;
 643		goto err;
 644	}
 645
 646	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
 647					DMA_FROM_DEVICE);
 648	if (dma_mapping_error(dev, orig_phys_addr)) {
 649		dev_err(dev, "failed to map source buffer address\n");
 650		ret = false;
 651		goto err_phys_addr;
 652	}
 653
 654	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
 655		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
 656		offset = phys_addr - orig_phys_addr;
 657		addr = orig_addr + offset;
 658	} else {
 659		phys_addr = orig_phys_addr;
 660		addr = orig_addr;
 661	}
 662
 663	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
 664				 lower_32_bits(phys_addr));
 665	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
 666				 upper_32_bits(phys_addr));
 667
 668	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
 669
 670	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 671	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 672	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 673	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 674				 COMMAND_WRITE);
 675
 676	wait_for_completion(&test->irq_raised);
 677
 678	dma_unmap_single(dev, orig_phys_addr, size + alignment,
 679			 DMA_FROM_DEVICE);
 680
 681	crc32 = crc32_le(~0, addr, size);
 682	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
 683		ret = true;
 684
 685err_phys_addr:
 686	kfree(orig_addr);
 687err:
 688	return ret;
 689}
 690
 691static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
 692{
 693	pci_endpoint_test_release_irq(test);
 694	pci_endpoint_test_free_irq_vectors(test);
 695	return true;
 696}
 697
 698static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
 699				      int req_irq_type)
 700{
 701	struct pci_dev *pdev = test->pdev;
 702	struct device *dev = &pdev->dev;
 703
 704	if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
 705		dev_err(dev, "Invalid IRQ type option\n");
 706		return false;
 707	}
 708
 709	if (test->irq_type == req_irq_type)
 710		return true;
 711
 712	pci_endpoint_test_release_irq(test);
 713	pci_endpoint_test_free_irq_vectors(test);
 714
 715	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
 716		goto err;
 717
 718	if (!pci_endpoint_test_request_irq(test))
 719		goto err;
 720
 721	return true;
 722
 723err:
 724	pci_endpoint_test_free_irq_vectors(test);
 725	return false;
 726}
 727
 728static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
 729				    unsigned long arg)
 730{
 731	int ret = -EINVAL;
 732	enum pci_barno bar;
 733	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
 734	struct pci_dev *pdev = test->pdev;
 735
 736	mutex_lock(&test->mutex);
 737
 738	reinit_completion(&test->irq_raised);
 739	test->last_irq = -ENODATA;
 740
 741	switch (cmd) {
 742	case PCITEST_BAR:
 743		bar = arg;
 744		if (bar > BAR_5)
 745			goto ret;
 746		if (is_am654_pci_dev(pdev) && bar == BAR_0)
 747			goto ret;
 748		ret = pci_endpoint_test_bar(test, bar);
 749		break;
 750	case PCITEST_INTX_IRQ:
 751		ret = pci_endpoint_test_intx_irq(test);
 752		break;
 753	case PCITEST_MSI:
 754	case PCITEST_MSIX:
 755		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
 756		break;
 757	case PCITEST_WRITE:
 758		ret = pci_endpoint_test_write(test, arg);
 759		break;
 760	case PCITEST_READ:
 761		ret = pci_endpoint_test_read(test, arg);
 762		break;
 763	case PCITEST_COPY:
 764		ret = pci_endpoint_test_copy(test, arg);
 765		break;
 766	case PCITEST_SET_IRQTYPE:
 767		ret = pci_endpoint_test_set_irq(test, arg);
 768		break;
 769	case PCITEST_GET_IRQTYPE:
 770		ret = irq_type;
 771		break;
 772	case PCITEST_CLEAR_IRQ:
 773		ret = pci_endpoint_test_clear_irq(test);
 774		break;
 775	}
 776
 777ret:
 778	mutex_unlock(&test->mutex);
 779	return ret;
 780}
 781
 782static const struct file_operations pci_endpoint_test_fops = {
 783	.owner = THIS_MODULE,
 784	.unlocked_ioctl = pci_endpoint_test_ioctl,
 785};
 786
 787static int pci_endpoint_test_probe(struct pci_dev *pdev,
 788				   const struct pci_device_id *ent)
 789{
 790	int err;
 791	int id;
 792	char name[24];
 793	enum pci_barno bar;
 794	void __iomem *base;
 795	struct device *dev = &pdev->dev;
 796	struct pci_endpoint_test *test;
 797	struct pci_endpoint_test_data *data;
 798	enum pci_barno test_reg_bar = BAR_0;
 799	struct miscdevice *misc_device;
 800
 801	if (pci_is_bridge(pdev))
 802		return -ENODEV;
 803
 804	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
 805	if (!test)
 806		return -ENOMEM;
 807
 808	test->test_reg_bar = 0;
 809	test->alignment = 0;
 810	test->pdev = pdev;
 811	test->irq_type = IRQ_TYPE_UNDEFINED;
 812
 813	if (no_msi)
 814		irq_type = IRQ_TYPE_INTX;
 815
 816	data = (struct pci_endpoint_test_data *)ent->driver_data;
 817	if (data) {
 818		test_reg_bar = data->test_reg_bar;
 819		test->test_reg_bar = test_reg_bar;
 820		test->alignment = data->alignment;
 821		irq_type = data->irq_type;
 822	}
 823
 824	init_completion(&test->irq_raised);
 825	mutex_init(&test->mutex);
 826
 827	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
 828	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
 829		dev_err(dev, "Cannot set DMA mask\n");
 830		return -EINVAL;
 831	}
 832
 833	err = pci_enable_device(pdev);
 834	if (err) {
 835		dev_err(dev, "Cannot enable PCI device\n");
 836		return err;
 837	}
 838
 839	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 840	if (err) {
 841		dev_err(dev, "Cannot obtain PCI resources\n");
 842		goto err_disable_pdev;
 843	}
 844
 845	pci_set_master(pdev);
 846
 847	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
 848		err = -EINVAL;
 849		goto err_disable_irq;
 850	}
 851
 852	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 853		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 854			base = pci_ioremap_bar(pdev, bar);
 855			if (!base) {
 856				dev_err(dev, "Failed to read BAR%d\n", bar);
 857				WARN_ON(bar == test_reg_bar);
 858			}
 859			test->bar[bar] = base;
 860		}
 861	}
 862
 863	test->base = test->bar[test_reg_bar];
 864	if (!test->base) {
 865		err = -ENOMEM;
 866		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
 867			test_reg_bar);
 868		goto err_iounmap;
 869	}
 870
 871	pci_set_drvdata(pdev, test);
 872
 873	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
 874	if (id < 0) {
 875		err = id;
 876		dev_err(dev, "Unable to get id\n");
 877		goto err_iounmap;
 878	}
 879
 880	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
 881	test->name = kstrdup(name, GFP_KERNEL);
 882	if (!test->name) {
 883		err = -ENOMEM;
 884		goto err_ida_remove;
 885	}
 886
 887	if (!pci_endpoint_test_request_irq(test)) {
 888		err = -EINVAL;
 889		goto err_kfree_test_name;
 890	}
 891
 892	misc_device = &test->miscdev;
 893	misc_device->minor = MISC_DYNAMIC_MINOR;
 894	misc_device->name = kstrdup(name, GFP_KERNEL);
 895	if (!misc_device->name) {
 896		err = -ENOMEM;
 897		goto err_release_irq;
 898	}
 899	misc_device->parent = &pdev->dev;
 900	misc_device->fops = &pci_endpoint_test_fops;
 901
 902	err = misc_register(misc_device);
 903	if (err) {
 904		dev_err(dev, "Failed to register device\n");
 905		goto err_kfree_name;
 906	}
 907
 908	return 0;
 909
 910err_kfree_name:
 911	kfree(misc_device->name);
 912
 913err_release_irq:
 914	pci_endpoint_test_release_irq(test);
 915
 916err_kfree_test_name:
 917	kfree(test->name);
 918
 919err_ida_remove:
 920	ida_free(&pci_endpoint_test_ida, id);
 921
 922err_iounmap:
 923	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 924		if (test->bar[bar])
 925			pci_iounmap(pdev, test->bar[bar]);
 926	}
 927
 928err_disable_irq:
 929	pci_endpoint_test_free_irq_vectors(test);
 930	pci_release_regions(pdev);
 931
 932err_disable_pdev:
 933	pci_disable_device(pdev);
 934
 935	return err;
 936}
 937
 938static void pci_endpoint_test_remove(struct pci_dev *pdev)
 939{
 940	int id;
 941	enum pci_barno bar;
 942	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
 943	struct miscdevice *misc_device = &test->miscdev;
 944
 945	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
 946		return;
 947	if (id < 0)
 948		return;
 949
 950	pci_endpoint_test_release_irq(test);
 951	pci_endpoint_test_free_irq_vectors(test);
 952
 953	misc_deregister(&test->miscdev);
 954	kfree(misc_device->name);
 955	kfree(test->name);
 956	ida_free(&pci_endpoint_test_ida, id);
 957	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 958		if (test->bar[bar])
 959			pci_iounmap(pdev, test->bar[bar]);
 960	}
 961
 
 
 
 962	pci_release_regions(pdev);
 963	pci_disable_device(pdev);
 964}
 965
 966static const struct pci_endpoint_test_data default_data = {
 967	.test_reg_bar = BAR_0,
 968	.alignment = SZ_4K,
 969	.irq_type = IRQ_TYPE_MSI,
 970};
 971
 972static const struct pci_endpoint_test_data am654_data = {
 973	.test_reg_bar = BAR_2,
 974	.alignment = SZ_64K,
 975	.irq_type = IRQ_TYPE_MSI,
 976};
 977
 978static const struct pci_endpoint_test_data j721e_data = {
 979	.alignment = 256,
 980	.irq_type = IRQ_TYPE_MSI,
 981};
 982
 983static const struct pci_device_id pci_endpoint_test_tbl[] = {
 984	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
 985	  .driver_data = (kernel_ulong_t)&default_data,
 986	},
 987	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
 988	  .driver_data = (kernel_ulong_t)&default_data,
 989	},
 990	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
 991	  .driver_data = (kernel_ulong_t)&default_data,
 992	},
 993	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
 994	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
 995	  .driver_data = (kernel_ulong_t)&default_data,
 996	},
 997	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
 998	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
 999	  .driver_data = (kernel_ulong_t)&am654_data
1000	},
1001	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1002	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1003	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1004	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1005	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1006	  .driver_data = (kernel_ulong_t)&default_data,
1007	},
1008	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1009	  .driver_data = (kernel_ulong_t)&j721e_data,
1010	},
1011	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1012	  .driver_data = (kernel_ulong_t)&j721e_data,
1013	},
1014	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1015	  .driver_data = (kernel_ulong_t)&j721e_data,
1016	},
1017	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1018	  .driver_data = (kernel_ulong_t)&j721e_data,
1019	},
1020	{ }
1021};
1022MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1023
1024static struct pci_driver pci_endpoint_test_driver = {
1025	.name		= DRV_MODULE_NAME,
1026	.id_table	= pci_endpoint_test_tbl,
1027	.probe		= pci_endpoint_test_probe,
1028	.remove		= pci_endpoint_test_remove,
1029	.sriov_configure = pci_sriov_configure_simple,
1030};
1031module_pci_driver(pci_endpoint_test_driver);
1032
1033MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1034MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1035MODULE_LICENSE("GPL v2");
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/**
  3 * Host side test driver to test endpoint functionality
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 11#include <linux/fs.h>
 12#include <linux/io.h>
 13#include <linux/interrupt.h>
 14#include <linux/irq.h>
 15#include <linux/miscdevice.h>
 16#include <linux/module.h>
 17#include <linux/mutex.h>
 18#include <linux/random.h>
 19#include <linux/slab.h>
 20#include <linux/uaccess.h>
 21#include <linux/pci.h>
 22#include <linux/pci_ids.h>
 23
 24#include <linux/pci_regs.h>
 25
 26#include <uapi/linux/pcitest.h>
 27
 28#define DRV_MODULE_NAME				"pci-endpoint-test"
 29
 30#define IRQ_TYPE_UNDEFINED			-1
 31#define IRQ_TYPE_LEGACY				0
 32#define IRQ_TYPE_MSI				1
 33#define IRQ_TYPE_MSIX				2
 34
 35#define PCI_ENDPOINT_TEST_MAGIC			0x0
 36
 37#define PCI_ENDPOINT_TEST_COMMAND		0x4
 38#define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
 39#define COMMAND_RAISE_MSI_IRQ			BIT(1)
 40#define COMMAND_RAISE_MSIX_IRQ			BIT(2)
 41#define COMMAND_READ				BIT(3)
 42#define COMMAND_WRITE				BIT(4)
 43#define COMMAND_COPY				BIT(5)
 44
 45#define PCI_ENDPOINT_TEST_STATUS		0x8
 46#define STATUS_READ_SUCCESS			BIT(0)
 47#define STATUS_READ_FAIL			BIT(1)
 48#define STATUS_WRITE_SUCCESS			BIT(2)
 49#define STATUS_WRITE_FAIL			BIT(3)
 50#define STATUS_COPY_SUCCESS			BIT(4)
 51#define STATUS_COPY_FAIL			BIT(5)
 52#define STATUS_IRQ_RAISED			BIT(6)
 53#define STATUS_SRC_ADDR_INVALID			BIT(7)
 54#define STATUS_DST_ADDR_INVALID			BIT(8)
 55
 56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
 57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
 58
 59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
 60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
 61
 62#define PCI_ENDPOINT_TEST_SIZE			0x1c
 63#define PCI_ENDPOINT_TEST_CHECKSUM		0x20
 64
 65#define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
 66#define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
 67
 68#define PCI_ENDPOINT_TEST_FLAGS			0x2c
 69#define FLAG_USE_DMA				BIT(0)
 70
 71#define PCI_DEVICE_ID_TI_J721E			0xb00d
 72#define PCI_DEVICE_ID_TI_AM654			0xb00c
 
 
 
 
 
 73
 74#define is_am654_pci_dev(pdev)		\
 75		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
 76
 
 
 77#define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
 
 
 78
 79static DEFINE_IDA(pci_endpoint_test_ida);
 80
 81#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
 82					    miscdev)
 83
 84static bool no_msi;
 85module_param(no_msi, bool, 0444);
 86MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
 87
 88static int irq_type = IRQ_TYPE_MSI;
 89module_param(irq_type, int, 0444);
 90MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
 91
 92enum pci_barno {
 93	BAR_0,
 94	BAR_1,
 95	BAR_2,
 96	BAR_3,
 97	BAR_4,
 98	BAR_5,
 99};
100
101struct pci_endpoint_test {
102	struct pci_dev	*pdev;
103	void __iomem	*base;
104	void __iomem	*bar[PCI_STD_NUM_BARS];
105	struct completion irq_raised;
106	int		last_irq;
107	int		num_irqs;
108	int		irq_type;
109	/* mutex to protect the ioctls */
110	struct mutex	mutex;
111	struct miscdevice miscdev;
112	enum pci_barno test_reg_bar;
113	size_t alignment;
114	const char *name;
115};
116
117struct pci_endpoint_test_data {
118	enum pci_barno test_reg_bar;
119	size_t alignment;
120	int irq_type;
121};
122
123static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
124					  u32 offset)
125{
126	return readl(test->base + offset);
127}
128
129static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
130					    u32 offset, u32 value)
131{
132	writel(value, test->base + offset);
133}
134
135static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
136					      int bar, int offset)
137{
138	return readl(test->bar[bar] + offset);
139}
140
141static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
142						int bar, u32 offset, u32 value)
143{
144	writel(value, test->bar[bar] + offset);
145}
146
147static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
148{
149	struct pci_endpoint_test *test = dev_id;
150	u32 reg;
151
152	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
153	if (reg & STATUS_IRQ_RAISED) {
154		test->last_irq = irq;
155		complete(&test->irq_raised);
156		reg &= ~STATUS_IRQ_RAISED;
157	}
158	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
159				 reg);
160
161	return IRQ_HANDLED;
162}
163
164static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
165{
166	struct pci_dev *pdev = test->pdev;
167
168	pci_free_irq_vectors(pdev);
169	test->irq_type = IRQ_TYPE_UNDEFINED;
170}
171
172static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
173						int type)
174{
175	int irq = -1;
176	struct pci_dev *pdev = test->pdev;
177	struct device *dev = &pdev->dev;
178	bool res = true;
179
180	switch (type) {
181	case IRQ_TYPE_LEGACY:
182		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
183		if (irq < 0)
184			dev_err(dev, "Failed to get Legacy interrupt\n");
185		break;
186	case IRQ_TYPE_MSI:
187		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
188		if (irq < 0)
189			dev_err(dev, "Failed to get MSI interrupts\n");
190		break;
191	case IRQ_TYPE_MSIX:
192		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
193		if (irq < 0)
194			dev_err(dev, "Failed to get MSI-X interrupts\n");
195		break;
196	default:
197		dev_err(dev, "Invalid IRQ type selected\n");
198	}
199
200	if (irq < 0) {
201		irq = 0;
202		res = false;
203	}
204
205	test->irq_type = type;
206	test->num_irqs = irq;
207
208	return res;
209}
210
211static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
212{
213	int i;
214	struct pci_dev *pdev = test->pdev;
215	struct device *dev = &pdev->dev;
216
217	for (i = 0; i < test->num_irqs; i++)
218		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
219
220	test->num_irqs = 0;
221}
222
223static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
224{
225	int i;
226	int err;
227	struct pci_dev *pdev = test->pdev;
228	struct device *dev = &pdev->dev;
229
230	for (i = 0; i < test->num_irqs; i++) {
231		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
232				       pci_endpoint_test_irqhandler,
233				       IRQF_SHARED, test->name, test);
234		if (err)
235			goto fail;
236	}
237
238	return true;
239
240fail:
241	switch (irq_type) {
242	case IRQ_TYPE_LEGACY:
243		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
244			pci_irq_vector(pdev, i));
245		break;
246	case IRQ_TYPE_MSI:
247		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
248			pci_irq_vector(pdev, i),
249			i + 1);
250		break;
251	case IRQ_TYPE_MSIX:
252		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
253			pci_irq_vector(pdev, i),
254			i + 1);
255		break;
256	}
257
258	return false;
259}
260
 
 
 
 
 
 
 
 
 
261static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
262				  enum pci_barno barno)
263{
264	int j;
265	u32 val;
266	int size;
267	struct pci_dev *pdev = test->pdev;
268
269	if (!test->bar[barno])
270		return false;
271
272	size = pci_resource_len(pdev, barno);
273
274	if (barno == test->test_reg_bar)
275		size = 0x4;
276
277	for (j = 0; j < size; j += 4)
278		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
 
279
280	for (j = 0; j < size; j += 4) {
281		val = pci_endpoint_test_bar_readl(test, barno, j);
282		if (val != 0xA0A0A0A0)
283			return false;
284	}
285
286	return true;
287}
288
289static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
290{
291	u32 val;
292
293	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
294				 IRQ_TYPE_LEGACY);
295	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
296	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
297				 COMMAND_RAISE_LEGACY_IRQ);
298	val = wait_for_completion_timeout(&test->irq_raised,
299					  msecs_to_jiffies(1000));
300	if (!val)
301		return false;
302
303	return true;
304}
305
306static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
307				       u16 msi_num, bool msix)
308{
309	u32 val;
310	struct pci_dev *pdev = test->pdev;
311
312	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
313				 msix == false ? IRQ_TYPE_MSI :
314				 IRQ_TYPE_MSIX);
315	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
316	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
317				 msix == false ? COMMAND_RAISE_MSI_IRQ :
318				 COMMAND_RAISE_MSIX_IRQ);
319	val = wait_for_completion_timeout(&test->irq_raised,
320					  msecs_to_jiffies(1000));
321	if (!val)
322		return false;
323
324	if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
325		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
326
327	return false;
328}
329
330static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
331				   unsigned long arg)
332{
333	struct pci_endpoint_test_xfer_param param;
334	bool ret = false;
335	void *src_addr;
336	void *dst_addr;
337	u32 flags = 0;
338	bool use_dma;
339	size_t size;
340	dma_addr_t src_phys_addr;
341	dma_addr_t dst_phys_addr;
342	struct pci_dev *pdev = test->pdev;
343	struct device *dev = &pdev->dev;
344	void *orig_src_addr;
345	dma_addr_t orig_src_phys_addr;
346	void *orig_dst_addr;
347	dma_addr_t orig_dst_phys_addr;
348	size_t offset;
349	size_t alignment = test->alignment;
350	int irq_type = test->irq_type;
351	u32 src_crc32;
352	u32 dst_crc32;
353	int err;
354
355	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
356	if (err) {
357		dev_err(dev, "Failed to get transfer param\n");
358		return false;
359	}
360
 
 
 
 
361	size = param.size;
362	if (size > SIZE_MAX - alignment)
363		goto err;
364
365	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
366	if (use_dma)
367		flags |= FLAG_USE_DMA;
368
369	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
370		dev_err(dev, "Invalid IRQ type option\n");
371		goto err;
372	}
373
374	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
375	if (!orig_src_addr) {
376		dev_err(dev, "Failed to allocate source buffer\n");
377		ret = false;
378		goto err;
379	}
380
381	get_random_bytes(orig_src_addr, size + alignment);
382	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
383					    size + alignment, DMA_TO_DEVICE);
384	if (dma_mapping_error(dev, orig_src_phys_addr)) {
385		dev_err(dev, "failed to map source buffer address\n");
386		ret = false;
387		goto err_src_phys_addr;
388	}
389
390	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
391		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
392		offset = src_phys_addr - orig_src_phys_addr;
393		src_addr = orig_src_addr + offset;
394	} else {
395		src_phys_addr = orig_src_phys_addr;
396		src_addr = orig_src_addr;
397	}
398
399	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
400				 lower_32_bits(src_phys_addr));
401
402	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
403				 upper_32_bits(src_phys_addr));
404
405	src_crc32 = crc32_le(~0, src_addr, size);
406
407	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
408	if (!orig_dst_addr) {
409		dev_err(dev, "Failed to allocate destination address\n");
410		ret = false;
411		goto err_dst_addr;
412	}
413
414	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
415					    size + alignment, DMA_FROM_DEVICE);
416	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
417		dev_err(dev, "failed to map destination buffer address\n");
418		ret = false;
419		goto err_dst_phys_addr;
420	}
421
422	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
423		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
424		offset = dst_phys_addr - orig_dst_phys_addr;
425		dst_addr = orig_dst_addr + offset;
426	} else {
427		dst_phys_addr = orig_dst_phys_addr;
428		dst_addr = orig_dst_addr;
429	}
430
431	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
432				 lower_32_bits(dst_phys_addr));
433	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
434				 upper_32_bits(dst_phys_addr));
435
436	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
437				 size);
438
439	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
440	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
441	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
442	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
443				 COMMAND_COPY);
444
445	wait_for_completion(&test->irq_raised);
446
447	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
448			 DMA_FROM_DEVICE);
449
450	dst_crc32 = crc32_le(~0, dst_addr, size);
451	if (dst_crc32 == src_crc32)
452		ret = true;
453
454err_dst_phys_addr:
455	kfree(orig_dst_addr);
456
457err_dst_addr:
458	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
459			 DMA_TO_DEVICE);
460
461err_src_phys_addr:
462	kfree(orig_src_addr);
463
464err:
465	return ret;
466}
467
468static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
469				    unsigned long arg)
470{
471	struct pci_endpoint_test_xfer_param param;
472	bool ret = false;
473	u32 flags = 0;
474	bool use_dma;
475	u32 reg;
476	void *addr;
477	dma_addr_t phys_addr;
478	struct pci_dev *pdev = test->pdev;
479	struct device *dev = &pdev->dev;
480	void *orig_addr;
481	dma_addr_t orig_phys_addr;
482	size_t offset;
483	size_t alignment = test->alignment;
484	int irq_type = test->irq_type;
485	size_t size;
486	u32 crc32;
487	int err;
488
489	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
490	if (err != 0) {
491		dev_err(dev, "Failed to get transfer param\n");
492		return false;
493	}
494
 
 
 
 
495	size = param.size;
496	if (size > SIZE_MAX - alignment)
497		goto err;
498
499	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
500	if (use_dma)
501		flags |= FLAG_USE_DMA;
502
503	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
504		dev_err(dev, "Invalid IRQ type option\n");
505		goto err;
506	}
507
508	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
509	if (!orig_addr) {
510		dev_err(dev, "Failed to allocate address\n");
511		ret = false;
512		goto err;
513	}
514
515	get_random_bytes(orig_addr, size + alignment);
516
517	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
518					DMA_TO_DEVICE);
519	if (dma_mapping_error(dev, orig_phys_addr)) {
520		dev_err(dev, "failed to map source buffer address\n");
521		ret = false;
522		goto err_phys_addr;
523	}
524
525	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
526		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
527		offset = phys_addr - orig_phys_addr;
528		addr = orig_addr + offset;
529	} else {
530		phys_addr = orig_phys_addr;
531		addr = orig_addr;
532	}
533
534	crc32 = crc32_le(~0, addr, size);
535	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
536				 crc32);
537
538	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
539				 lower_32_bits(phys_addr));
540	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
541				 upper_32_bits(phys_addr));
542
543	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
544
545	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
546	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
547	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
548	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
549				 COMMAND_READ);
550
551	wait_for_completion(&test->irq_raised);
552
553	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
554	if (reg & STATUS_READ_SUCCESS)
555		ret = true;
556
557	dma_unmap_single(dev, orig_phys_addr, size + alignment,
558			 DMA_TO_DEVICE);
559
560err_phys_addr:
561	kfree(orig_addr);
562
563err:
564	return ret;
565}
566
567static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
568				   unsigned long arg)
569{
570	struct pci_endpoint_test_xfer_param param;
571	bool ret = false;
572	u32 flags = 0;
573	bool use_dma;
574	size_t size;
575	void *addr;
576	dma_addr_t phys_addr;
577	struct pci_dev *pdev = test->pdev;
578	struct device *dev = &pdev->dev;
579	void *orig_addr;
580	dma_addr_t orig_phys_addr;
581	size_t offset;
582	size_t alignment = test->alignment;
583	int irq_type = test->irq_type;
584	u32 crc32;
585	int err;
586
587	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
588	if (err) {
589		dev_err(dev, "Failed to get transfer param\n");
590		return false;
591	}
592
 
 
 
 
593	size = param.size;
594	if (size > SIZE_MAX - alignment)
595		goto err;
596
597	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
598	if (use_dma)
599		flags |= FLAG_USE_DMA;
600
601	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
602		dev_err(dev, "Invalid IRQ type option\n");
603		goto err;
604	}
605
606	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
607	if (!orig_addr) {
608		dev_err(dev, "Failed to allocate destination address\n");
609		ret = false;
610		goto err;
611	}
612
613	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
614					DMA_FROM_DEVICE);
615	if (dma_mapping_error(dev, orig_phys_addr)) {
616		dev_err(dev, "failed to map source buffer address\n");
617		ret = false;
618		goto err_phys_addr;
619	}
620
621	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
622		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
623		offset = phys_addr - orig_phys_addr;
624		addr = orig_addr + offset;
625	} else {
626		phys_addr = orig_phys_addr;
627		addr = orig_addr;
628	}
629
630	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
631				 lower_32_bits(phys_addr));
632	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
633				 upper_32_bits(phys_addr));
634
635	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
636
637	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
638	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
639	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
640	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
641				 COMMAND_WRITE);
642
643	wait_for_completion(&test->irq_raised);
644
645	dma_unmap_single(dev, orig_phys_addr, size + alignment,
646			 DMA_FROM_DEVICE);
647
648	crc32 = crc32_le(~0, addr, size);
649	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
650		ret = true;
651
652err_phys_addr:
653	kfree(orig_addr);
654err:
655	return ret;
656}
657
658static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
659{
660	pci_endpoint_test_release_irq(test);
661	pci_endpoint_test_free_irq_vectors(test);
662	return true;
663}
664
665static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
666				      int req_irq_type)
667{
668	struct pci_dev *pdev = test->pdev;
669	struct device *dev = &pdev->dev;
670
671	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
672		dev_err(dev, "Invalid IRQ type option\n");
673		return false;
674	}
675
676	if (test->irq_type == req_irq_type)
677		return true;
678
679	pci_endpoint_test_release_irq(test);
680	pci_endpoint_test_free_irq_vectors(test);
681
682	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
683		goto err;
684
685	if (!pci_endpoint_test_request_irq(test))
686		goto err;
687
688	return true;
689
690err:
691	pci_endpoint_test_free_irq_vectors(test);
692	return false;
693}
694
695static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
696				    unsigned long arg)
697{
698	int ret = -EINVAL;
699	enum pci_barno bar;
700	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
701	struct pci_dev *pdev = test->pdev;
702
703	mutex_lock(&test->mutex);
 
 
 
 
704	switch (cmd) {
705	case PCITEST_BAR:
706		bar = arg;
707		if (bar < 0 || bar > 5)
708			goto ret;
709		if (is_am654_pci_dev(pdev) && bar == BAR_0)
710			goto ret;
711		ret = pci_endpoint_test_bar(test, bar);
712		break;
713	case PCITEST_LEGACY_IRQ:
714		ret = pci_endpoint_test_legacy_irq(test);
715		break;
716	case PCITEST_MSI:
717	case PCITEST_MSIX:
718		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
719		break;
720	case PCITEST_WRITE:
721		ret = pci_endpoint_test_write(test, arg);
722		break;
723	case PCITEST_READ:
724		ret = pci_endpoint_test_read(test, arg);
725		break;
726	case PCITEST_COPY:
727		ret = pci_endpoint_test_copy(test, arg);
728		break;
729	case PCITEST_SET_IRQTYPE:
730		ret = pci_endpoint_test_set_irq(test, arg);
731		break;
732	case PCITEST_GET_IRQTYPE:
733		ret = irq_type;
734		break;
735	case PCITEST_CLEAR_IRQ:
736		ret = pci_endpoint_test_clear_irq(test);
737		break;
738	}
739
740ret:
741	mutex_unlock(&test->mutex);
742	return ret;
743}
744
745static const struct file_operations pci_endpoint_test_fops = {
746	.owner = THIS_MODULE,
747	.unlocked_ioctl = pci_endpoint_test_ioctl,
748};
749
750static int pci_endpoint_test_probe(struct pci_dev *pdev,
751				   const struct pci_device_id *ent)
752{
753	int err;
754	int id;
755	char name[24];
756	enum pci_barno bar;
757	void __iomem *base;
758	struct device *dev = &pdev->dev;
759	struct pci_endpoint_test *test;
760	struct pci_endpoint_test_data *data;
761	enum pci_barno test_reg_bar = BAR_0;
762	struct miscdevice *misc_device;
763
764	if (pci_is_bridge(pdev))
765		return -ENODEV;
766
767	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
768	if (!test)
769		return -ENOMEM;
770
771	test->test_reg_bar = 0;
772	test->alignment = 0;
773	test->pdev = pdev;
774	test->irq_type = IRQ_TYPE_UNDEFINED;
775
776	if (no_msi)
777		irq_type = IRQ_TYPE_LEGACY;
778
779	data = (struct pci_endpoint_test_data *)ent->driver_data;
780	if (data) {
781		test_reg_bar = data->test_reg_bar;
782		test->test_reg_bar = test_reg_bar;
783		test->alignment = data->alignment;
784		irq_type = data->irq_type;
785	}
786
787	init_completion(&test->irq_raised);
788	mutex_init(&test->mutex);
789
790	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
791	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
792		dev_err(dev, "Cannot set DMA mask\n");
793		return -EINVAL;
794	}
795
796	err = pci_enable_device(pdev);
797	if (err) {
798		dev_err(dev, "Cannot enable PCI device\n");
799		return err;
800	}
801
802	err = pci_request_regions(pdev, DRV_MODULE_NAME);
803	if (err) {
804		dev_err(dev, "Cannot obtain PCI resources\n");
805		goto err_disable_pdev;
806	}
807
808	pci_set_master(pdev);
809
810	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
 
811		goto err_disable_irq;
 
812
813	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
814		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
815			base = pci_ioremap_bar(pdev, bar);
816			if (!base) {
817				dev_err(dev, "Failed to read BAR%d\n", bar);
818				WARN_ON(bar == test_reg_bar);
819			}
820			test->bar[bar] = base;
821		}
822	}
823
824	test->base = test->bar[test_reg_bar];
825	if (!test->base) {
826		err = -ENOMEM;
827		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
828			test_reg_bar);
829		goto err_iounmap;
830	}
831
832	pci_set_drvdata(pdev, test);
833
834	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
835	if (id < 0) {
836		err = id;
837		dev_err(dev, "Unable to get id\n");
838		goto err_iounmap;
839	}
840
841	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
842	test->name = kstrdup(name, GFP_KERNEL);
843	if (!test->name) {
844		err = -ENOMEM;
845		goto err_ida_remove;
846	}
847
848	if (!pci_endpoint_test_request_irq(test))
 
849		goto err_kfree_test_name;
 
850
851	misc_device = &test->miscdev;
852	misc_device->minor = MISC_DYNAMIC_MINOR;
853	misc_device->name = kstrdup(name, GFP_KERNEL);
854	if (!misc_device->name) {
855		err = -ENOMEM;
856		goto err_release_irq;
857	}
858	misc_device->fops = &pci_endpoint_test_fops,
 
859
860	err = misc_register(misc_device);
861	if (err) {
862		dev_err(dev, "Failed to register device\n");
863		goto err_kfree_name;
864	}
865
866	return 0;
867
868err_kfree_name:
869	kfree(misc_device->name);
870
871err_release_irq:
872	pci_endpoint_test_release_irq(test);
873
874err_kfree_test_name:
875	kfree(test->name);
876
877err_ida_remove:
878	ida_simple_remove(&pci_endpoint_test_ida, id);
879
880err_iounmap:
881	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
882		if (test->bar[bar])
883			pci_iounmap(pdev, test->bar[bar]);
884	}
885
886err_disable_irq:
887	pci_endpoint_test_free_irq_vectors(test);
888	pci_release_regions(pdev);
889
890err_disable_pdev:
891	pci_disable_device(pdev);
892
893	return err;
894}
895
896static void pci_endpoint_test_remove(struct pci_dev *pdev)
897{
898	int id;
899	enum pci_barno bar;
900	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
901	struct miscdevice *misc_device = &test->miscdev;
902
903	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
904		return;
905	if (id < 0)
906		return;
907
 
 
 
908	misc_deregister(&test->miscdev);
909	kfree(misc_device->name);
910	kfree(test->name);
911	ida_simple_remove(&pci_endpoint_test_ida, id);
912	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
913		if (test->bar[bar])
914			pci_iounmap(pdev, test->bar[bar]);
915	}
916
917	pci_endpoint_test_release_irq(test);
918	pci_endpoint_test_free_irq_vectors(test);
919
920	pci_release_regions(pdev);
921	pci_disable_device(pdev);
922}
923
924static const struct pci_endpoint_test_data default_data = {
925	.test_reg_bar = BAR_0,
926	.alignment = SZ_4K,
927	.irq_type = IRQ_TYPE_MSI,
928};
929
930static const struct pci_endpoint_test_data am654_data = {
931	.test_reg_bar = BAR_2,
932	.alignment = SZ_64K,
933	.irq_type = IRQ_TYPE_MSI,
934};
935
936static const struct pci_endpoint_test_data j721e_data = {
937	.alignment = 256,
938	.irq_type = IRQ_TYPE_MSI,
939};
940
941static const struct pci_device_id pci_endpoint_test_tbl[] = {
942	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
943	  .driver_data = (kernel_ulong_t)&default_data,
944	},
945	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
946	  .driver_data = (kernel_ulong_t)&default_data,
947	},
948	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
 
 
 
 
 
 
949	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
950	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
951	  .driver_data = (kernel_ulong_t)&am654_data
952	},
953	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),
 
 
 
 
 
954	},
955	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
956	  .driver_data = (kernel_ulong_t)&j721e_data,
957	},
 
 
 
 
 
 
 
 
 
958	{ }
959};
960MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
961
962static struct pci_driver pci_endpoint_test_driver = {
963	.name		= DRV_MODULE_NAME,
964	.id_table	= pci_endpoint_test_tbl,
965	.probe		= pci_endpoint_test_probe,
966	.remove		= pci_endpoint_test_remove,
 
967};
968module_pci_driver(pci_endpoint_test_driver);
969
970MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
971MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
972MODULE_LICENSE("GPL v2");