Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Host side test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/cleanup.h>
  11#include <linux/delay.h>
  12#include <linux/fs.h>
  13#include <linux/io.h>
  14#include <linux/interrupt.h>
  15#include <linux/irq.h>
  16#include <linux/miscdevice.h>
  17#include <linux/module.h>
  18#include <linux/mutex.h>
  19#include <linux/random.h>
  20#include <linux/slab.h>
  21#include <linux/uaccess.h>
  22#include <linux/pci.h>
  23#include <linux/pci_ids.h>
  24
  25#include <linux/pci_regs.h>
  26
  27#include <uapi/linux/pcitest.h>
  28
  29#define DRV_MODULE_NAME				"pci-endpoint-test"
 
 
  30
  31#define IRQ_TYPE_UNDEFINED			-1
  32#define IRQ_TYPE_INTX				0
  33#define IRQ_TYPE_MSI				1
  34#define IRQ_TYPE_MSIX				2
  35
  36#define PCI_ENDPOINT_TEST_MAGIC			0x0
  37
  38#define PCI_ENDPOINT_TEST_COMMAND		0x4
  39#define COMMAND_RAISE_INTX_IRQ			BIT(0)
  40#define COMMAND_RAISE_MSI_IRQ			BIT(1)
  41#define COMMAND_RAISE_MSIX_IRQ			BIT(2)
  42#define COMMAND_READ				BIT(3)
  43#define COMMAND_WRITE				BIT(4)
  44#define COMMAND_COPY				BIT(5)
  45
  46#define PCI_ENDPOINT_TEST_STATUS		0x8
  47#define STATUS_READ_SUCCESS			BIT(0)
  48#define STATUS_READ_FAIL			BIT(1)
  49#define STATUS_WRITE_SUCCESS			BIT(2)
  50#define STATUS_WRITE_FAIL			BIT(3)
  51#define STATUS_COPY_SUCCESS			BIT(4)
  52#define STATUS_COPY_FAIL			BIT(5)
  53#define STATUS_IRQ_RAISED			BIT(6)
  54#define STATUS_SRC_ADDR_INVALID			BIT(7)
  55#define STATUS_DST_ADDR_INVALID			BIT(8)
  56
  57#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
  58#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
  59
  60#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
  61#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
  62
  63#define PCI_ENDPOINT_TEST_SIZE			0x1c
  64#define PCI_ENDPOINT_TEST_CHECKSUM		0x20
  65
  66#define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
  67#define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
  68
  69#define PCI_ENDPOINT_TEST_FLAGS			0x2c
  70#define FLAG_USE_DMA				BIT(0)
  71
  72#define PCI_DEVICE_ID_TI_AM654			0xb00c
  73#define PCI_DEVICE_ID_TI_J7200			0xb00f
  74#define PCI_DEVICE_ID_TI_AM64			0xb010
  75#define PCI_DEVICE_ID_TI_J721S2		0xb013
  76#define PCI_DEVICE_ID_LS1088A			0x80c0
  77#define PCI_DEVICE_ID_IMX8			0x0808
  78
  79#define is_am654_pci_dev(pdev)		\
  80		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  81
  82#define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
  83#define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
  84#define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
  85#define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
  86#define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
  87
  88#define PCI_VENDOR_ID_ROCKCHIP			0x1d87
  89#define PCI_DEVICE_ID_ROCKCHIP_RK3588		0x3588
  90
  91static DEFINE_IDA(pci_endpoint_test_ida);
  92
  93#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  94					    miscdev)
  95
  96static bool no_msi;
  97module_param(no_msi, bool, 0444);
  98MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  99
 100static int irq_type = IRQ_TYPE_MSI;
 101module_param(irq_type, int, 0444);
 102MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
 103
 104enum pci_barno {
 105	BAR_0,
 106	BAR_1,
 107	BAR_2,
 108	BAR_3,
 109	BAR_4,
 110	BAR_5,
 111};
 112
 113struct pci_endpoint_test {
 114	struct pci_dev	*pdev;
 115	void __iomem	*base;
 116	void __iomem	*bar[PCI_STD_NUM_BARS];
 117	struct completion irq_raised;
 118	int		last_irq;
 119	int		num_irqs;
 120	int		irq_type;
 121	/* mutex to protect the ioctls */
 122	struct mutex	mutex;
 123	struct miscdevice miscdev;
 124	enum pci_barno test_reg_bar;
 125	size_t alignment;
 126	const char *name;
 127};
 128
 129struct pci_endpoint_test_data {
 130	enum pci_barno test_reg_bar;
 131	size_t alignment;
 132	int irq_type;
 133};
 134
 135static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
 136					  u32 offset)
 137{
 138	return readl(test->base + offset);
 139}
 140
 141static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
 142					    u32 offset, u32 value)
 143{
 144	writel(value, test->base + offset);
 145}
 146
 
 
 
 
 
 
 
 
 
 
 
 
 147static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
 148{
 149	struct pci_endpoint_test *test = dev_id;
 150	u32 reg;
 151
 152	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
 153	if (reg & STATUS_IRQ_RAISED) {
 154		test->last_irq = irq;
 155		complete(&test->irq_raised);
 
 156	}
 
 
 157
 158	return IRQ_HANDLED;
 159}
 160
 161static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
 162{
 163	struct pci_dev *pdev = test->pdev;
 164
 165	pci_free_irq_vectors(pdev);
 166	test->irq_type = IRQ_TYPE_UNDEFINED;
 167}
 168
 169static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
 170						int type)
 171{
 172	int irq = -1;
 173	struct pci_dev *pdev = test->pdev;
 174	struct device *dev = &pdev->dev;
 175	bool res = true;
 176
 177	switch (type) {
 178	case IRQ_TYPE_INTX:
 179		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
 180		if (irq < 0)
 181			dev_err(dev, "Failed to get Legacy interrupt\n");
 182		break;
 183	case IRQ_TYPE_MSI:
 184		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
 185		if (irq < 0)
 186			dev_err(dev, "Failed to get MSI interrupts\n");
 187		break;
 188	case IRQ_TYPE_MSIX:
 189		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
 190		if (irq < 0)
 191			dev_err(dev, "Failed to get MSI-X interrupts\n");
 192		break;
 193	default:
 194		dev_err(dev, "Invalid IRQ type selected\n");
 195	}
 196
 197	if (irq < 0) {
 198		irq = 0;
 199		res = false;
 200	}
 201
 202	test->irq_type = type;
 203	test->num_irqs = irq;
 204
 205	return res;
 206}
 207
 208static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
 209{
 210	int i;
 211	struct pci_dev *pdev = test->pdev;
 212	struct device *dev = &pdev->dev;
 213
 214	for (i = 0; i < test->num_irqs; i++)
 215		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
 216
 217	test->num_irqs = 0;
 218}
 219
 220static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
 221{
 222	int i;
 223	int err;
 224	struct pci_dev *pdev = test->pdev;
 225	struct device *dev = &pdev->dev;
 226
 227	for (i = 0; i < test->num_irqs; i++) {
 228		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
 229				       pci_endpoint_test_irqhandler,
 230				       IRQF_SHARED, test->name, test);
 231		if (err)
 232			goto fail;
 233	}
 234
 235	return true;
 236
 237fail:
 238	switch (irq_type) {
 239	case IRQ_TYPE_INTX:
 240		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
 241			pci_irq_vector(pdev, i));
 242		break;
 243	case IRQ_TYPE_MSI:
 244		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
 245			pci_irq_vector(pdev, i),
 246			i + 1);
 247		break;
 248	case IRQ_TYPE_MSIX:
 249		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
 250			pci_irq_vector(pdev, i),
 251			i + 1);
 252		break;
 253	}
 254
 255	return false;
 256}
 257
 258static const u32 bar_test_pattern[] = {
 259	0xA0A0A0A0,
 260	0xA1A1A1A1,
 261	0xA2A2A2A2,
 262	0xA3A3A3A3,
 263	0xA4A4A4A4,
 264	0xA5A5A5A5,
 265};
 266
 267static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
 268					enum pci_barno barno, int offset,
 269					void *write_buf, void *read_buf,
 270					int size)
 271{
 272	memset(write_buf, bar_test_pattern[barno], size);
 273	memcpy_toio(test->bar[barno] + offset, write_buf, size);
 274
 275	memcpy_fromio(read_buf, test->bar[barno] + offset, size);
 276
 277	return memcmp(write_buf, read_buf, size);
 278}
 279
 280static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
 281				  enum pci_barno barno)
 282{
 283	int j, bar_size, buf_size, iters, remain;
 284	void *write_buf __free(kfree) = NULL;
 285	void *read_buf __free(kfree) = NULL;
 286	struct pci_dev *pdev = test->pdev;
 287
 288	if (!test->bar[barno])
 289		return false;
 290
 291	bar_size = pci_resource_len(pdev, barno);
 292
 293	if (barno == test->test_reg_bar)
 294		bar_size = 0x4;
 295
 296	/*
 297	 * Allocate a buffer of max size 1MB, and reuse that buffer while
 298	 * iterating over the whole BAR size (which might be much larger).
 299	 */
 300	buf_size = min(SZ_1M, bar_size);
 301
 302	write_buf = kmalloc(buf_size, GFP_KERNEL);
 303	if (!write_buf)
 304		return false;
 305
 306	read_buf = kmalloc(buf_size, GFP_KERNEL);
 307	if (!read_buf)
 308		return false;
 309
 310	iters = bar_size / buf_size;
 311	for (j = 0; j < iters; j++)
 312		if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
 313						 write_buf, read_buf, buf_size))
 314			return false;
 315
 316	remain = bar_size % buf_size;
 317	if (remain)
 318		if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
 319						 write_buf, read_buf, remain))
 320			return false;
 
 321
 322	return true;
 323}
 324
 325static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
 326{
 327	u32 val;
 328
 329	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
 330				 IRQ_TYPE_INTX);
 331	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
 332	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 333				 COMMAND_RAISE_INTX_IRQ);
 334	val = wait_for_completion_timeout(&test->irq_raised,
 335					  msecs_to_jiffies(1000));
 336	if (!val)
 337		return false;
 338
 339	return true;
 340}
 341
 342static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
 343				       u16 msi_num, bool msix)
 344{
 345	u32 val;
 346	struct pci_dev *pdev = test->pdev;
 347
 348	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
 349				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
 350	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
 351	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 352				 msix ? COMMAND_RAISE_MSIX_IRQ :
 353				 COMMAND_RAISE_MSI_IRQ);
 354	val = wait_for_completion_timeout(&test->irq_raised,
 355					  msecs_to_jiffies(1000));
 356	if (!val)
 357		return false;
 358
 359	return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
 360}
 361
 362static int pci_endpoint_test_validate_xfer_params(struct device *dev,
 363		struct pci_endpoint_test_xfer_param *param, size_t alignment)
 364{
 365	if (!param->size) {
 366		dev_dbg(dev, "Data size is zero\n");
 367		return -EINVAL;
 368	}
 369
 370	if (param->size > SIZE_MAX - alignment) {
 371		dev_dbg(dev, "Maximum transfer data size exceeded\n");
 372		return -EINVAL;
 373	}
 374
 375	return 0;
 376}
 377
 378static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
 379				   unsigned long arg)
 380{
 381	struct pci_endpoint_test_xfer_param param;
 382	bool ret = false;
 383	void *src_addr;
 384	void *dst_addr;
 385	u32 flags = 0;
 386	bool use_dma;
 387	size_t size;
 388	dma_addr_t src_phys_addr;
 389	dma_addr_t dst_phys_addr;
 390	struct pci_dev *pdev = test->pdev;
 391	struct device *dev = &pdev->dev;
 392	void *orig_src_addr;
 393	dma_addr_t orig_src_phys_addr;
 394	void *orig_dst_addr;
 395	dma_addr_t orig_dst_phys_addr;
 396	size_t offset;
 397	size_t alignment = test->alignment;
 398	int irq_type = test->irq_type;
 399	u32 src_crc32;
 400	u32 dst_crc32;
 401	int err;
 402
 403	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 404	if (err) {
 405		dev_err(dev, "Failed to get transfer param\n");
 406		return false;
 407	}
 408
 409	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 410	if (err)
 411		return false;
 412
 413	size = param.size;
 414
 415	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 416	if (use_dma)
 417		flags |= FLAG_USE_DMA;
 418
 419	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
 420		dev_err(dev, "Invalid IRQ type option\n");
 421		goto err;
 422	}
 423
 424	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 425	if (!orig_src_addr) {
 426		dev_err(dev, "Failed to allocate source buffer\n");
 427		ret = false;
 428		goto err;
 429	}
 430
 431	get_random_bytes(orig_src_addr, size + alignment);
 432	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
 433					    size + alignment, DMA_TO_DEVICE);
 434	if (dma_mapping_error(dev, orig_src_phys_addr)) {
 435		dev_err(dev, "failed to map source buffer address\n");
 436		ret = false;
 437		goto err_src_phys_addr;
 438	}
 439
 440	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
 441		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
 442		offset = src_phys_addr - orig_src_phys_addr;
 443		src_addr = orig_src_addr + offset;
 444	} else {
 445		src_phys_addr = orig_src_phys_addr;
 446		src_addr = orig_src_addr;
 447	}
 448
 449	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
 450				 lower_32_bits(src_phys_addr));
 451
 452	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
 453				 upper_32_bits(src_phys_addr));
 454
 
 455	src_crc32 = crc32_le(~0, src_addr, size);
 456
 457	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 458	if (!orig_dst_addr) {
 459		dev_err(dev, "Failed to allocate destination address\n");
 460		ret = false;
 461		goto err_dst_addr;
 462	}
 463
 464	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
 465					    size + alignment, DMA_FROM_DEVICE);
 466	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
 467		dev_err(dev, "failed to map destination buffer address\n");
 468		ret = false;
 469		goto err_dst_phys_addr;
 470	}
 471
 472	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
 473		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
 474		offset = dst_phys_addr - orig_dst_phys_addr;
 475		dst_addr = orig_dst_addr + offset;
 476	} else {
 477		dst_phys_addr = orig_dst_phys_addr;
 478		dst_addr = orig_dst_addr;
 479	}
 480
 481	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
 482				 lower_32_bits(dst_phys_addr));
 483	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
 484				 upper_32_bits(dst_phys_addr));
 485
 486	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
 487				 size);
 488
 489	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 490	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 491	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 492	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 493				 COMMAND_COPY);
 494
 495	wait_for_completion(&test->irq_raised);
 496
 497	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
 498			 DMA_FROM_DEVICE);
 499
 500	dst_crc32 = crc32_le(~0, dst_addr, size);
 501	if (dst_crc32 == src_crc32)
 502		ret = true;
 503
 504err_dst_phys_addr:
 505	kfree(orig_dst_addr);
 506
 507err_dst_addr:
 508	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
 509			 DMA_TO_DEVICE);
 510
 511err_src_phys_addr:
 512	kfree(orig_src_addr);
 
 513
 514err:
 515	return ret;
 516}
 517
 518static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
 519				    unsigned long arg)
 520{
 521	struct pci_endpoint_test_xfer_param param;
 522	bool ret = false;
 523	u32 flags = 0;
 524	bool use_dma;
 525	u32 reg;
 526	void *addr;
 527	dma_addr_t phys_addr;
 528	struct pci_dev *pdev = test->pdev;
 529	struct device *dev = &pdev->dev;
 530	void *orig_addr;
 531	dma_addr_t orig_phys_addr;
 532	size_t offset;
 533	size_t alignment = test->alignment;
 534	int irq_type = test->irq_type;
 535	size_t size;
 536	u32 crc32;
 537	int err;
 538
 539	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 540	if (err != 0) {
 541		dev_err(dev, "Failed to get transfer param\n");
 542		return false;
 543	}
 544
 545	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 546	if (err)
 547		return false;
 548
 549	size = param.size;
 550
 551	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 552	if (use_dma)
 553		flags |= FLAG_USE_DMA;
 554
 555	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
 556		dev_err(dev, "Invalid IRQ type option\n");
 557		goto err;
 558	}
 559
 560	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 561	if (!orig_addr) {
 562		dev_err(dev, "Failed to allocate address\n");
 563		ret = false;
 564		goto err;
 565	}
 566
 567	get_random_bytes(orig_addr, size + alignment);
 568
 569	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
 570					DMA_TO_DEVICE);
 571	if (dma_mapping_error(dev, orig_phys_addr)) {
 572		dev_err(dev, "failed to map source buffer address\n");
 573		ret = false;
 574		goto err_phys_addr;
 575	}
 576
 577	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
 578		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
 579		offset = phys_addr - orig_phys_addr;
 580		addr = orig_addr + offset;
 581	} else {
 582		phys_addr = orig_phys_addr;
 583		addr = orig_addr;
 584	}
 585
 
 
 586	crc32 = crc32_le(~0, addr, size);
 587	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
 588				 crc32);
 589
 590	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
 591				 lower_32_bits(phys_addr));
 592	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
 593				 upper_32_bits(phys_addr));
 594
 595	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
 596
 597	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 598	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 599	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 600	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 601				 COMMAND_READ);
 602
 603	wait_for_completion(&test->irq_raised);
 604
 605	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
 606	if (reg & STATUS_READ_SUCCESS)
 607		ret = true;
 608
 609	dma_unmap_single(dev, orig_phys_addr, size + alignment,
 610			 DMA_TO_DEVICE);
 611
 612err_phys_addr:
 613	kfree(orig_addr);
 614
 615err:
 616	return ret;
 617}
 618
 619static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
 620				   unsigned long arg)
 621{
 622	struct pci_endpoint_test_xfer_param param;
 623	bool ret = false;
 624	u32 flags = 0;
 625	bool use_dma;
 626	size_t size;
 627	void *addr;
 628	dma_addr_t phys_addr;
 629	struct pci_dev *pdev = test->pdev;
 630	struct device *dev = &pdev->dev;
 631	void *orig_addr;
 632	dma_addr_t orig_phys_addr;
 633	size_t offset;
 634	size_t alignment = test->alignment;
 635	int irq_type = test->irq_type;
 636	u32 crc32;
 637	int err;
 638
 639	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 640	if (err) {
 641		dev_err(dev, "Failed to get transfer param\n");
 642		return false;
 643	}
 644
 645	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 646	if (err)
 647		return false;
 648
 649	size = param.size;
 650
 651	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 652	if (use_dma)
 653		flags |= FLAG_USE_DMA;
 654
 655	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
 656		dev_err(dev, "Invalid IRQ type option\n");
 657		goto err;
 658	}
 659
 660	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 661	if (!orig_addr) {
 662		dev_err(dev, "Failed to allocate destination address\n");
 663		ret = false;
 664		goto err;
 665	}
 666
 667	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
 668					DMA_FROM_DEVICE);
 669	if (dma_mapping_error(dev, orig_phys_addr)) {
 670		dev_err(dev, "failed to map source buffer address\n");
 671		ret = false;
 672		goto err_phys_addr;
 673	}
 674
 675	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
 676		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
 677		offset = phys_addr - orig_phys_addr;
 678		addr = orig_addr + offset;
 679	} else {
 680		phys_addr = orig_phys_addr;
 681		addr = orig_addr;
 682	}
 683
 684	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
 685				 lower_32_bits(phys_addr));
 686	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
 687				 upper_32_bits(phys_addr));
 688
 689	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
 690
 691	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 692	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 693	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 694	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 695				 COMMAND_WRITE);
 696
 697	wait_for_completion(&test->irq_raised);
 698
 699	dma_unmap_single(dev, orig_phys_addr, size + alignment,
 700			 DMA_FROM_DEVICE);
 701
 702	crc32 = crc32_le(~0, addr, size);
 703	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
 704		ret = true;
 705
 706err_phys_addr:
 707	kfree(orig_addr);
 708err:
 709	return ret;
 710}
 711
 712static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
 713{
 714	pci_endpoint_test_release_irq(test);
 715	pci_endpoint_test_free_irq_vectors(test);
 716	return true;
 717}
 718
 719static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
 720				      int req_irq_type)
 721{
 722	struct pci_dev *pdev = test->pdev;
 723	struct device *dev = &pdev->dev;
 724
 725	if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
 726		dev_err(dev, "Invalid IRQ type option\n");
 727		return false;
 728	}
 729
 730	if (test->irq_type == req_irq_type)
 731		return true;
 732
 733	pci_endpoint_test_release_irq(test);
 734	pci_endpoint_test_free_irq_vectors(test);
 735
 736	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
 737		goto err;
 738
 739	if (!pci_endpoint_test_request_irq(test))
 740		goto err;
 741
 742	return true;
 743
 744err:
 745	pci_endpoint_test_free_irq_vectors(test);
 746	return false;
 747}
 748
 749static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
 750				    unsigned long arg)
 751{
 752	int ret = -EINVAL;
 753	enum pci_barno bar;
 754	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
 755	struct pci_dev *pdev = test->pdev;
 756
 757	mutex_lock(&test->mutex);
 758
 759	reinit_completion(&test->irq_raised);
 760	test->last_irq = -ENODATA;
 761
 762	switch (cmd) {
 763	case PCITEST_BAR:
 764		bar = arg;
 765		if (bar > BAR_5)
 766			goto ret;
 767		if (is_am654_pci_dev(pdev) && bar == BAR_0)
 768			goto ret;
 769		ret = pci_endpoint_test_bar(test, bar);
 770		break;
 771	case PCITEST_INTX_IRQ:
 772		ret = pci_endpoint_test_intx_irq(test);
 773		break;
 774	case PCITEST_MSI:
 775	case PCITEST_MSIX:
 776		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
 777		break;
 778	case PCITEST_WRITE:
 779		ret = pci_endpoint_test_write(test, arg);
 780		break;
 781	case PCITEST_READ:
 782		ret = pci_endpoint_test_read(test, arg);
 783		break;
 784	case PCITEST_COPY:
 785		ret = pci_endpoint_test_copy(test, arg);
 786		break;
 787	case PCITEST_SET_IRQTYPE:
 788		ret = pci_endpoint_test_set_irq(test, arg);
 789		break;
 790	case PCITEST_GET_IRQTYPE:
 791		ret = irq_type;
 792		break;
 793	case PCITEST_CLEAR_IRQ:
 794		ret = pci_endpoint_test_clear_irq(test);
 795		break;
 796	}
 797
 798ret:
 799	mutex_unlock(&test->mutex);
 800	return ret;
 801}
 802
 803static const struct file_operations pci_endpoint_test_fops = {
 804	.owner = THIS_MODULE,
 805	.unlocked_ioctl = pci_endpoint_test_ioctl,
 806};
 807
 808static int pci_endpoint_test_probe(struct pci_dev *pdev,
 809				   const struct pci_device_id *ent)
 810{
 
 811	int err;
 
 812	int id;
 813	char name[24];
 814	enum pci_barno bar;
 815	void __iomem *base;
 816	struct device *dev = &pdev->dev;
 817	struct pci_endpoint_test *test;
 818	struct pci_endpoint_test_data *data;
 819	enum pci_barno test_reg_bar = BAR_0;
 820	struct miscdevice *misc_device;
 821
 822	if (pci_is_bridge(pdev))
 823		return -ENODEV;
 824
 825	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
 826	if (!test)
 827		return -ENOMEM;
 828
 829	test->test_reg_bar = 0;
 830	test->alignment = 0;
 831	test->pdev = pdev;
 832	test->irq_type = IRQ_TYPE_UNDEFINED;
 833
 834	if (no_msi)
 835		irq_type = IRQ_TYPE_INTX;
 836
 837	data = (struct pci_endpoint_test_data *)ent->driver_data;
 838	if (data) {
 839		test_reg_bar = data->test_reg_bar;
 840		test->test_reg_bar = test_reg_bar;
 841		test->alignment = data->alignment;
 842		irq_type = data->irq_type;
 843	}
 844
 845	init_completion(&test->irq_raised);
 846	mutex_init(&test->mutex);
 847
 848	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
 849
 850	err = pci_enable_device(pdev);
 851	if (err) {
 852		dev_err(dev, "Cannot enable PCI device\n");
 853		return err;
 854	}
 855
 856	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 857	if (err) {
 858		dev_err(dev, "Cannot obtain PCI resources\n");
 859		goto err_disable_pdev;
 860	}
 861
 862	pci_set_master(pdev);
 863
 864	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
 865		err = -EINVAL;
 866		goto err_disable_irq;
 
 
 
 
 
 
 
 
 
 867	}
 868
 869	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 
 
 
 
 
 
 
 
 
 870		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 871			base = pci_ioremap_bar(pdev, bar);
 872			if (!base) {
 873				dev_err(dev, "Failed to read BAR%d\n", bar);
 874				WARN_ON(bar == test_reg_bar);
 875			}
 876			test->bar[bar] = base;
 877		}
 878	}
 879
 880	test->base = test->bar[test_reg_bar];
 881	if (!test->base) {
 882		err = -ENOMEM;
 883		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
 884			test_reg_bar);
 885		goto err_iounmap;
 886	}
 887
 888	pci_set_drvdata(pdev, test);
 889
 890	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
 891	if (id < 0) {
 892		err = id;
 893		dev_err(dev, "Unable to get id\n");
 894		goto err_iounmap;
 895	}
 896
 897	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
 898	test->name = kstrdup(name, GFP_KERNEL);
 899	if (!test->name) {
 900		err = -ENOMEM;
 901		goto err_ida_remove;
 902	}
 903
 904	if (!pci_endpoint_test_request_irq(test)) {
 905		err = -EINVAL;
 906		goto err_kfree_test_name;
 907	}
 908
 909	misc_device = &test->miscdev;
 910	misc_device->minor = MISC_DYNAMIC_MINOR;
 911	misc_device->name = kstrdup(name, GFP_KERNEL);
 912	if (!misc_device->name) {
 913		err = -ENOMEM;
 914		goto err_release_irq;
 915	}
 916	misc_device->parent = &pdev->dev;
 917	misc_device->fops = &pci_endpoint_test_fops;
 918
 919	err = misc_register(misc_device);
 920	if (err) {
 921		dev_err(dev, "Failed to register device\n");
 922		goto err_kfree_name;
 923	}
 924
 925	return 0;
 926
 927err_kfree_name:
 928	kfree(misc_device->name);
 929
 930err_release_irq:
 931	pci_endpoint_test_release_irq(test);
 932
 933err_kfree_test_name:
 934	kfree(test->name);
 935
 936err_ida_remove:
 937	ida_free(&pci_endpoint_test_ida, id);
 938
 939err_iounmap:
 940	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 941		if (test->bar[bar])
 942			pci_iounmap(pdev, test->bar[bar]);
 943	}
 944
 945err_disable_irq:
 946	pci_endpoint_test_free_irq_vectors(test);
 
 
 
 947	pci_release_regions(pdev);
 948
 949err_disable_pdev:
 950	pci_disable_device(pdev);
 951
 952	return err;
 953}
 954
 955static void pci_endpoint_test_remove(struct pci_dev *pdev)
 956{
 957	int id;
 
 958	enum pci_barno bar;
 959	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
 960	struct miscdevice *misc_device = &test->miscdev;
 961
 962	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
 963		return;
 964	if (id < 0)
 965		return;
 966
 967	pci_endpoint_test_release_irq(test);
 968	pci_endpoint_test_free_irq_vectors(test);
 969
 970	misc_deregister(&test->miscdev);
 971	kfree(misc_device->name);
 972	kfree(test->name);
 973	ida_free(&pci_endpoint_test_ida, id);
 974	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 975		if (test->bar[bar])
 976			pci_iounmap(pdev, test->bar[bar]);
 977	}
 978
 
 
 979	pci_release_regions(pdev);
 980	pci_disable_device(pdev);
 981}
 982
 983static const struct pci_endpoint_test_data default_data = {
 984	.test_reg_bar = BAR_0,
 985	.alignment = SZ_4K,
 986	.irq_type = IRQ_TYPE_MSI,
 987};
 988
 989static const struct pci_endpoint_test_data am654_data = {
 990	.test_reg_bar = BAR_2,
 991	.alignment = SZ_64K,
 992	.irq_type = IRQ_TYPE_MSI,
 993};
 994
 995static const struct pci_endpoint_test_data j721e_data = {
 996	.alignment = 256,
 997	.irq_type = IRQ_TYPE_MSI,
 998};
 999
1000static const struct pci_endpoint_test_data rk3588_data = {
1001	.alignment = SZ_64K,
1002	.irq_type = IRQ_TYPE_MSI,
1003};
1004
1005/*
1006 * If the controller's Vendor/Device ID are programmable, you may be able to
1007 * use one of the existing entries for testing instead of adding a new one.
1008 */
1009static const struct pci_device_id pci_endpoint_test_tbl[] = {
1010	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1011	  .driver_data = (kernel_ulong_t)&default_data,
1012	},
1013	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1014	  .driver_data = (kernel_ulong_t)&default_data,
1015	},
1016	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1017	  .driver_data = (kernel_ulong_t)&default_data,
1018	},
1019	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1020	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1021	  .driver_data = (kernel_ulong_t)&default_data,
1022	},
1023	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1024	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1025	  .driver_data = (kernel_ulong_t)&am654_data
1026	},
1027	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1028	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1029	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1030	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1031	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1032	  .driver_data = (kernel_ulong_t)&default_data,
1033	},
1034	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1035	  .driver_data = (kernel_ulong_t)&j721e_data,
1036	},
1037	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1038	  .driver_data = (kernel_ulong_t)&j721e_data,
1039	},
1040	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1041	  .driver_data = (kernel_ulong_t)&j721e_data,
1042	},
1043	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1044	  .driver_data = (kernel_ulong_t)&j721e_data,
1045	},
1046	{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1047	  .driver_data = (kernel_ulong_t)&rk3588_data,
1048	},
1049	{ }
1050};
1051MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1052
1053static struct pci_driver pci_endpoint_test_driver = {
1054	.name		= DRV_MODULE_NAME,
1055	.id_table	= pci_endpoint_test_tbl,
1056	.probe		= pci_endpoint_test_probe,
1057	.remove		= pci_endpoint_test_remove,
1058	.sriov_configure = pci_sriov_configure_simple,
1059};
1060module_pci_driver(pci_endpoint_test_driver);
1061
1062MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1063MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1064MODULE_LICENSE("GPL v2");
v4.17
  1/**
 
  2 * Host side test driver to test endpoint functionality
  3 *
  4 * Copyright (C) 2017 Texas Instruments
  5 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  6 *
  7 * This program is free software: you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 of
  9 * the License as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 20#include <linux/crc32.h>
 
 21#include <linux/delay.h>
 22#include <linux/fs.h>
 23#include <linux/io.h>
 24#include <linux/interrupt.h>
 25#include <linux/irq.h>
 26#include <linux/miscdevice.h>
 27#include <linux/module.h>
 28#include <linux/mutex.h>
 29#include <linux/random.h>
 30#include <linux/slab.h>
 
 31#include <linux/pci.h>
 32#include <linux/pci_ids.h>
 33
 34#include <linux/pci_regs.h>
 35
 36#include <uapi/linux/pcitest.h>
 37
 38#define DRV_MODULE_NAME			"pci-endpoint-test"
 39
 40#define PCI_ENDPOINT_TEST_MAGIC		0x0
 41
 42#define PCI_ENDPOINT_TEST_COMMAND	0x4
 43#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
 44#define COMMAND_RAISE_MSI_IRQ		BIT(1)
 45#define MSI_NUMBER_SHIFT		2
 46/* 6 bits for MSI number */
 47#define COMMAND_READ                    BIT(8)
 48#define COMMAND_WRITE                   BIT(9)
 49#define COMMAND_COPY                    BIT(10)
 50
 51#define PCI_ENDPOINT_TEST_STATUS	0x8
 52#define STATUS_READ_SUCCESS             BIT(0)
 53#define STATUS_READ_FAIL                BIT(1)
 54#define STATUS_WRITE_SUCCESS            BIT(2)
 55#define STATUS_WRITE_FAIL               BIT(3)
 56#define STATUS_COPY_SUCCESS             BIT(4)
 57#define STATUS_COPY_FAIL                BIT(5)
 58#define STATUS_IRQ_RAISED               BIT(6)
 59#define STATUS_SRC_ADDR_INVALID         BIT(7)
 60#define STATUS_DST_ADDR_INVALID         BIT(8)
 
 
 
 
 
 
 61
 62#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0xc
 63#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
 64
 65#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
 66#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
 67
 68#define PCI_ENDPOINT_TEST_SIZE		0x1c
 69#define PCI_ENDPOINT_TEST_CHECKSUM	0x20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70
 71static DEFINE_IDA(pci_endpoint_test_ida);
 72
 73#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
 74					    miscdev)
 75
 76static bool no_msi;
 77module_param(no_msi, bool, 0444);
 78MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
 79
 
 
 
 
 80enum pci_barno {
 81	BAR_0,
 82	BAR_1,
 83	BAR_2,
 84	BAR_3,
 85	BAR_4,
 86	BAR_5,
 87};
 88
 89struct pci_endpoint_test {
 90	struct pci_dev	*pdev;
 91	void __iomem	*base;
 92	void __iomem	*bar[6];
 93	struct completion irq_raised;
 94	int		last_irq;
 95	int		num_irqs;
 
 96	/* mutex to protect the ioctls */
 97	struct mutex	mutex;
 98	struct miscdevice miscdev;
 99	enum pci_barno test_reg_bar;
100	size_t alignment;
 
101};
102
103struct pci_endpoint_test_data {
104	enum pci_barno test_reg_bar;
105	size_t alignment;
106	bool no_msi;
107};
108
109static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
110					  u32 offset)
111{
112	return readl(test->base + offset);
113}
114
115static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
116					    u32 offset, u32 value)
117{
118	writel(value, test->base + offset);
119}
120
121static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
122					      int bar, int offset)
123{
124	return readl(test->bar[bar] + offset);
125}
126
127static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
128						int bar, u32 offset, u32 value)
129{
130	writel(value, test->bar[bar] + offset);
131}
132
133static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
134{
135	struct pci_endpoint_test *test = dev_id;
136	u32 reg;
137
138	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
139	if (reg & STATUS_IRQ_RAISED) {
140		test->last_irq = irq;
141		complete(&test->irq_raised);
142		reg &= ~STATUS_IRQ_RAISED;
143	}
144	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
145				 reg);
146
147	return IRQ_HANDLED;
148}
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
151				  enum pci_barno barno)
152{
153	int j;
154	u32 val;
155	int size;
156	struct pci_dev *pdev = test->pdev;
157
158	if (!test->bar[barno])
159		return false;
160
161	size = pci_resource_len(pdev, barno);
162
163	if (barno == test->test_reg_bar)
164		size = 0x4;
165
166	for (j = 0; j < size; j += 4)
167		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
 
 
 
168
169	for (j = 0; j < size; j += 4) {
170		val = pci_endpoint_test_bar_readl(test, barno, j);
171		if (val != 0xA0A0A0A0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172			return false;
173	}
174
175	return true;
176}
177
178static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
179{
180	u32 val;
181
 
 
 
182	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
183				 COMMAND_RAISE_LEGACY_IRQ);
184	val = wait_for_completion_timeout(&test->irq_raised,
185					  msecs_to_jiffies(1000));
186	if (!val)
187		return false;
188
189	return true;
190}
191
192static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
193				      u8 msi_num)
194{
195	u32 val;
196	struct pci_dev *pdev = test->pdev;
197
 
 
 
198	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
199				 msi_num << MSI_NUMBER_SHIFT |
200				 COMMAND_RAISE_MSI_IRQ);
201	val = wait_for_completion_timeout(&test->irq_raised,
202					  msecs_to_jiffies(1000));
203	if (!val)
204		return false;
205
206	if (test->last_irq - pdev->irq == msi_num - 1)
207		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
208
209	return false;
210}
211
212static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
 
213{
 
214	bool ret = false;
215	void *src_addr;
216	void *dst_addr;
 
 
 
217	dma_addr_t src_phys_addr;
218	dma_addr_t dst_phys_addr;
219	struct pci_dev *pdev = test->pdev;
220	struct device *dev = &pdev->dev;
221	void *orig_src_addr;
222	dma_addr_t orig_src_phys_addr;
223	void *orig_dst_addr;
224	dma_addr_t orig_dst_phys_addr;
225	size_t offset;
226	size_t alignment = test->alignment;
 
227	u32 src_crc32;
228	u32 dst_crc32;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
230	if (size > SIZE_MAX - alignment)
 
231		goto err;
 
232
233	orig_src_addr = dma_alloc_coherent(dev, size + alignment,
234					   &orig_src_phys_addr, GFP_KERNEL);
235	if (!orig_src_addr) {
236		dev_err(dev, "failed to allocate source buffer\n");
237		ret = false;
238		goto err;
239	}
240
 
 
 
 
 
 
 
 
 
241	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
242		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
243		offset = src_phys_addr - orig_src_phys_addr;
244		src_addr = orig_src_addr + offset;
245	} else {
246		src_phys_addr = orig_src_phys_addr;
247		src_addr = orig_src_addr;
248	}
249
250	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
251				 lower_32_bits(src_phys_addr));
252
253	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
254				 upper_32_bits(src_phys_addr));
255
256	get_random_bytes(src_addr, size);
257	src_crc32 = crc32_le(~0, src_addr, size);
258
259	orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
260					   &orig_dst_phys_addr, GFP_KERNEL);
261	if (!orig_dst_addr) {
262		dev_err(dev, "failed to allocate destination address\n");
 
 
 
 
 
 
 
 
263		ret = false;
264		goto err_orig_src_addr;
265	}
266
267	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
268		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
269		offset = dst_phys_addr - orig_dst_phys_addr;
270		dst_addr = orig_dst_addr + offset;
271	} else {
272		dst_phys_addr = orig_dst_phys_addr;
273		dst_addr = orig_dst_addr;
274	}
275
276	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
277				 lower_32_bits(dst_phys_addr));
278	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
279				 upper_32_bits(dst_phys_addr));
280
281	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
282				 size);
283
 
 
 
284	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
285				 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
286
287	wait_for_completion(&test->irq_raised);
288
 
 
 
289	dst_crc32 = crc32_le(~0, dst_addr, size);
290	if (dst_crc32 == src_crc32)
291		ret = true;
292
293	dma_free_coherent(dev, size + alignment, orig_dst_addr,
294			  orig_dst_phys_addr);
 
 
 
 
295
296err_orig_src_addr:
297	dma_free_coherent(dev, size + alignment, orig_src_addr,
298			  orig_src_phys_addr);
299
300err:
301	return ret;
302}
303
304static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
 
305{
 
306	bool ret = false;
 
 
307	u32 reg;
308	void *addr;
309	dma_addr_t phys_addr;
310	struct pci_dev *pdev = test->pdev;
311	struct device *dev = &pdev->dev;
312	void *orig_addr;
313	dma_addr_t orig_phys_addr;
314	size_t offset;
315	size_t alignment = test->alignment;
 
 
316	u32 crc32;
 
 
 
 
 
 
 
 
 
 
 
317
318	if (size > SIZE_MAX - alignment)
 
 
 
 
 
 
 
319		goto err;
 
320
321	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
322				       GFP_KERNEL);
323	if (!orig_addr) {
324		dev_err(dev, "failed to allocate address\n");
325		ret = false;
326		goto err;
327	}
328
 
 
 
 
 
 
 
 
 
 
329	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
330		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
331		offset = phys_addr - orig_phys_addr;
332		addr = orig_addr + offset;
333	} else {
334		phys_addr = orig_phys_addr;
335		addr = orig_addr;
336	}
337
338	get_random_bytes(addr, size);
339
340	crc32 = crc32_le(~0, addr, size);
341	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
342				 crc32);
343
344	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
345				 lower_32_bits(phys_addr));
346	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
347				 upper_32_bits(phys_addr));
348
349	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
350
 
 
 
351	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
352				 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
353
354	wait_for_completion(&test->irq_raised);
355
356	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
357	if (reg & STATUS_READ_SUCCESS)
358		ret = true;
359
360	dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
 
 
 
 
361
362err:
363	return ret;
364}
365
366static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
 
367{
 
368	bool ret = false;
 
 
 
369	void *addr;
370	dma_addr_t phys_addr;
371	struct pci_dev *pdev = test->pdev;
372	struct device *dev = &pdev->dev;
373	void *orig_addr;
374	dma_addr_t orig_phys_addr;
375	size_t offset;
376	size_t alignment = test->alignment;
 
377	u32 crc32;
 
378
379	if (size > SIZE_MAX - alignment)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380		goto err;
 
381
382	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
383				       GFP_KERNEL);
384	if (!orig_addr) {
385		dev_err(dev, "failed to allocate destination address\n");
386		ret = false;
387		goto err;
388	}
389
 
 
 
 
 
 
 
 
390	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
391		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
392		offset = phys_addr - orig_phys_addr;
393		addr = orig_addr + offset;
394	} else {
395		phys_addr = orig_phys_addr;
396		addr = orig_addr;
397	}
398
399	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
400				 lower_32_bits(phys_addr));
401	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
402				 upper_32_bits(phys_addr));
403
404	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
405
 
 
 
406	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
407				 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
408
409	wait_for_completion(&test->irq_raised);
410
 
 
 
411	crc32 = crc32_le(~0, addr, size);
412	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
413		ret = true;
414
415	dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
 
416err:
417	return ret;
418}
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
421				    unsigned long arg)
422{
423	int ret = -EINVAL;
424	enum pci_barno bar;
425	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
 
426
427	mutex_lock(&test->mutex);
 
 
 
 
428	switch (cmd) {
429	case PCITEST_BAR:
430		bar = arg;
431		if (bar < 0 || bar > 5)
 
 
432			goto ret;
433		ret = pci_endpoint_test_bar(test, bar);
434		break;
435	case PCITEST_LEGACY_IRQ:
436		ret = pci_endpoint_test_legacy_irq(test);
437		break;
438	case PCITEST_MSI:
439		ret = pci_endpoint_test_msi_irq(test, arg);
 
440		break;
441	case PCITEST_WRITE:
442		ret = pci_endpoint_test_write(test, arg);
443		break;
444	case PCITEST_READ:
445		ret = pci_endpoint_test_read(test, arg);
446		break;
447	case PCITEST_COPY:
448		ret = pci_endpoint_test_copy(test, arg);
449		break;
 
 
 
 
 
 
 
 
 
450	}
451
452ret:
453	mutex_unlock(&test->mutex);
454	return ret;
455}
456
457static const struct file_operations pci_endpoint_test_fops = {
458	.owner = THIS_MODULE,
459	.unlocked_ioctl = pci_endpoint_test_ioctl,
460};
461
462static int pci_endpoint_test_probe(struct pci_dev *pdev,
463				   const struct pci_device_id *ent)
464{
465	int i;
466	int err;
467	int irq = 0;
468	int id;
469	char name[20];
470	enum pci_barno bar;
471	void __iomem *base;
472	struct device *dev = &pdev->dev;
473	struct pci_endpoint_test *test;
474	struct pci_endpoint_test_data *data;
475	enum pci_barno test_reg_bar = BAR_0;
476	struct miscdevice *misc_device;
477
478	if (pci_is_bridge(pdev))
479		return -ENODEV;
480
481	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
482	if (!test)
483		return -ENOMEM;
484
485	test->test_reg_bar = 0;
486	test->alignment = 0;
487	test->pdev = pdev;
 
 
 
 
488
489	data = (struct pci_endpoint_test_data *)ent->driver_data;
490	if (data) {
491		test_reg_bar = data->test_reg_bar;
 
492		test->alignment = data->alignment;
493		no_msi = data->no_msi;
494	}
495
496	init_completion(&test->irq_raised);
497	mutex_init(&test->mutex);
498
 
 
499	err = pci_enable_device(pdev);
500	if (err) {
501		dev_err(dev, "Cannot enable PCI device\n");
502		return err;
503	}
504
505	err = pci_request_regions(pdev, DRV_MODULE_NAME);
506	if (err) {
507		dev_err(dev, "Cannot obtain PCI resources\n");
508		goto err_disable_pdev;
509	}
510
511	pci_set_master(pdev);
512
513	if (!no_msi) {
514		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
515		if (irq < 0)
516			dev_err(dev, "failed to get MSI interrupts\n");
517		test->num_irqs = irq;
518	}
519
520	err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
521			       IRQF_SHARED, DRV_MODULE_NAME, test);
522	if (err) {
523		dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
524		goto err_disable_msi;
525	}
526
527	for (i = 1; i < irq; i++) {
528		err = devm_request_irq(dev, pdev->irq + i,
529				       pci_endpoint_test_irqhandler,
530				       IRQF_SHARED, DRV_MODULE_NAME, test);
531		if (err)
532			dev_err(dev, "failed to request IRQ %d for MSI %d\n",
533				pdev->irq + i, i + 1);
534	}
535
536	for (bar = BAR_0; bar <= BAR_5; bar++) {
537		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
538			base = pci_ioremap_bar(pdev, bar);
539			if (!base) {
540				dev_err(dev, "failed to read BAR%d\n", bar);
541				WARN_ON(bar == test_reg_bar);
542			}
543			test->bar[bar] = base;
544		}
545	}
546
547	test->base = test->bar[test_reg_bar];
548	if (!test->base) {
549		err = -ENOMEM;
550		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
551			test_reg_bar);
552		goto err_iounmap;
553	}
554
555	pci_set_drvdata(pdev, test);
556
557	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
558	if (id < 0) {
559		err = id;
560		dev_err(dev, "unable to get id\n");
561		goto err_iounmap;
562	}
563
564	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
 
 
 
 
 
 
 
 
 
 
 
565	misc_device = &test->miscdev;
566	misc_device->minor = MISC_DYNAMIC_MINOR;
567	misc_device->name = kstrdup(name, GFP_KERNEL);
568	if (!misc_device->name) {
569		err = -ENOMEM;
570		goto err_ida_remove;
571	}
572	misc_device->fops = &pci_endpoint_test_fops,
 
573
574	err = misc_register(misc_device);
575	if (err) {
576		dev_err(dev, "failed to register device\n");
577		goto err_kfree_name;
578	}
579
580	return 0;
581
582err_kfree_name:
583	kfree(misc_device->name);
584
 
 
 
 
 
 
585err_ida_remove:
586	ida_simple_remove(&pci_endpoint_test_ida, id);
587
588err_iounmap:
589	for (bar = BAR_0; bar <= BAR_5; bar++) {
590		if (test->bar[bar])
591			pci_iounmap(pdev, test->bar[bar]);
592	}
593
594	for (i = 0; i < irq; i++)
595		devm_free_irq(dev, pdev->irq + i, test);
596
597err_disable_msi:
598	pci_disable_msi(pdev);
599	pci_release_regions(pdev);
600
601err_disable_pdev:
602	pci_disable_device(pdev);
603
604	return err;
605}
606
607static void pci_endpoint_test_remove(struct pci_dev *pdev)
608{
609	int id;
610	int i;
611	enum pci_barno bar;
612	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
613	struct miscdevice *misc_device = &test->miscdev;
614
615	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
616		return;
617	if (id < 0)
618		return;
619
 
 
 
620	misc_deregister(&test->miscdev);
621	kfree(misc_device->name);
622	ida_simple_remove(&pci_endpoint_test_ida, id);
623	for (bar = BAR_0; bar <= BAR_5; bar++) {
 
624		if (test->bar[bar])
625			pci_iounmap(pdev, test->bar[bar]);
626	}
627	for (i = 0; i < test->num_irqs; i++)
628		devm_free_irq(&pdev->dev, pdev->irq + i, test);
629	pci_disable_msi(pdev);
630	pci_release_regions(pdev);
631	pci_disable_device(pdev);
632}
633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634static const struct pci_device_id pci_endpoint_test_tbl[] = {
635	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
636	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637	{ }
638};
639MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
640
641static struct pci_driver pci_endpoint_test_driver = {
642	.name		= DRV_MODULE_NAME,
643	.id_table	= pci_endpoint_test_tbl,
644	.probe		= pci_endpoint_test_probe,
645	.remove		= pci_endpoint_test_remove,
 
646};
647module_pci_driver(pci_endpoint_test_driver);
648
649MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
650MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
651MODULE_LICENSE("GPL v2");