Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/dmaengine.h>
  12#include <linux/io.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/pci_ids.h>
  16#include <linux/random.h>
  17
  18#include <linux/pci-epc.h>
  19#include <linux/pci-epf.h>
  20#include <linux/pci_regs.h>
  21
  22#define IRQ_TYPE_LEGACY			0
  23#define IRQ_TYPE_MSI			1
  24#define IRQ_TYPE_MSIX			2
  25
  26#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
  27#define COMMAND_RAISE_MSI_IRQ		BIT(1)
  28#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
  29#define COMMAND_READ			BIT(3)
  30#define COMMAND_WRITE			BIT(4)
  31#define COMMAND_COPY			BIT(5)
 
  32
  33#define STATUS_READ_SUCCESS		BIT(0)
  34#define STATUS_READ_FAIL		BIT(1)
  35#define STATUS_WRITE_SUCCESS		BIT(2)
  36#define STATUS_WRITE_FAIL		BIT(3)
  37#define STATUS_COPY_SUCCESS		BIT(4)
  38#define STATUS_COPY_FAIL		BIT(5)
  39#define STATUS_IRQ_RAISED		BIT(6)
  40#define STATUS_SRC_ADDR_INVALID		BIT(7)
  41#define STATUS_DST_ADDR_INVALID		BIT(8)
  42
  43#define FLAG_USE_DMA			BIT(0)
  44
  45#define TIMER_RESOLUTION		1
  46
  47static struct workqueue_struct *kpcitest_workqueue;
  48
  49struct pci_epf_test {
  50	void			*reg[PCI_STD_NUM_BARS];
  51	struct pci_epf		*epf;
  52	enum pci_barno		test_reg_bar;
  53	size_t			msix_table_offset;
  54	struct delayed_work	cmd_handler;
  55	struct dma_chan		*dma_chan_tx;
  56	struct dma_chan		*dma_chan_rx;
  57	struct completion	transfer_complete;
  58	bool			dma_supported;
  59	bool			dma_private;
  60	const struct pci_epc_features *epc_features;
  61};
  62
  63struct pci_epf_test_reg {
  64	u32	magic;
  65	u32	command;
  66	u32	status;
  67	u64	src_addr;
  68	u64	dst_addr;
  69	u32	size;
  70	u32	checksum;
  71	u32	irq_type;
  72	u32	irq_number;
  73	u32	flags;
  74} __packed;
  75
  76static struct pci_epf_header test_header = {
  77	.vendorid	= PCI_ANY_ID,
  78	.deviceid	= PCI_ANY_ID,
  79	.baseclass_code = PCI_CLASS_OTHERS,
  80	.interrupt_pin	= PCI_INTERRUPT_INTA,
  81};
  82
  83static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
  84
  85static void pci_epf_test_dma_callback(void *param)
  86{
  87	struct pci_epf_test *epf_test = param;
  88
  89	complete(&epf_test->transfer_complete);
  90}
  91
  92/**
  93 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
  94 *				  data between PCIe EP and remote PCIe RC
  95 * @epf_test: the EPF test device that performs the data transfer operation
  96 * @dma_dst: The destination address of the data transfer. It can be a physical
  97 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
  98 * @dma_src: The source address of the data transfer. It can be a physical
  99 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 100 * @len: The size of the data transfer
 101 * @dma_remote: remote RC physical address
 102 * @dir: DMA transfer direction
 103 *
 104 * Function that uses dmaengine API to transfer data between PCIe EP and remote
 105 * PCIe RC. The source and destination address can be a physical address given
 106 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
 107 *
 108 * The function returns '0' on success and negative value on failure.
 109 */
 110static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
 111				      dma_addr_t dma_dst, dma_addr_t dma_src,
 112				      size_t len, dma_addr_t dma_remote,
 113				      enum dma_transfer_direction dir)
 114{
 115	struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
 116				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
 117	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
 118	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 119	struct pci_epf *epf = epf_test->epf;
 120	struct dma_async_tx_descriptor *tx;
 121	struct dma_slave_config sconf = {};
 122	struct device *dev = &epf->dev;
 123	dma_cookie_t cookie;
 124	int ret;
 125
 126	if (IS_ERR_OR_NULL(chan)) {
 127		dev_err(dev, "Invalid DMA memcpy channel\n");
 128		return -EINVAL;
 129	}
 130
 131	if (epf_test->dma_private) {
 132		sconf.direction = dir;
 133		if (dir == DMA_MEM_TO_DEV)
 134			sconf.dst_addr = dma_remote;
 135		else
 136			sconf.src_addr = dma_remote;
 137
 138		if (dmaengine_slave_config(chan, &sconf)) {
 139			dev_err(dev, "DMA slave config fail\n");
 140			return -EIO;
 141		}
 142		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
 143						 flags);
 144	} else {
 145		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
 146					       flags);
 147	}
 148
 149	if (!tx) {
 150		dev_err(dev, "Failed to prepare DMA memcpy\n");
 151		return -EIO;
 152	}
 153
 154	tx->callback = pci_epf_test_dma_callback;
 155	tx->callback_param = epf_test;
 156	cookie = tx->tx_submit(tx);
 157	reinit_completion(&epf_test->transfer_complete);
 158
 159	ret = dma_submit_error(cookie);
 160	if (ret) {
 161		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
 162		return -EIO;
 163	}
 164
 165	dma_async_issue_pending(chan);
 166	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
 167	if (ret < 0) {
 168		dmaengine_terminate_sync(chan);
 169		dev_err(dev, "DMA wait_for_completion_timeout\n");
 170		return -ETIMEDOUT;
 171	}
 172
 173	return 0;
 174}
 175
 176struct epf_dma_filter {
 177	struct device *dev;
 178	u32 dma_mask;
 179};
 180
 181static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
 182{
 183	struct epf_dma_filter *filter = node;
 184	struct dma_slave_caps caps;
 185
 186	memset(&caps, 0, sizeof(caps));
 187	dma_get_slave_caps(chan, &caps);
 188
 189	return chan->device->dev == filter->dev
 190		&& (filter->dma_mask & caps.directions);
 191}
 192
 193/**
 194 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
 195 * @epf_test: the EPF test device that performs data transfer operation
 196 *
 197 * Function to initialize EPF test DMA channel.
 198 */
 199static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
 200{
 201	struct pci_epf *epf = epf_test->epf;
 202	struct device *dev = &epf->dev;
 203	struct epf_dma_filter filter;
 204	struct dma_chan *dma_chan;
 205	dma_cap_mask_t mask;
 206	int ret;
 207
 208	filter.dev = epf->epc->dev.parent;
 209	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
 210
 211	dma_cap_zero(mask);
 212	dma_cap_set(DMA_SLAVE, mask);
 213	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 214	if (!dma_chan) {
 215		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
 216		goto fail_back_tx;
 217	}
 218
 219	epf_test->dma_chan_rx = dma_chan;
 220
 221	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
 222	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 223
 224	if (!dma_chan) {
 225		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
 226		goto fail_back_rx;
 227	}
 228
 229	epf_test->dma_chan_tx = dma_chan;
 230	epf_test->dma_private = true;
 231
 232	init_completion(&epf_test->transfer_complete);
 233
 234	return 0;
 235
 236fail_back_rx:
 237	dma_release_channel(epf_test->dma_chan_rx);
 238	epf_test->dma_chan_tx = NULL;
 239
 240fail_back_tx:
 241	dma_cap_zero(mask);
 242	dma_cap_set(DMA_MEMCPY, mask);
 243
 244	dma_chan = dma_request_chan_by_mask(&mask);
 245	if (IS_ERR(dma_chan)) {
 246		ret = PTR_ERR(dma_chan);
 247		if (ret != -EPROBE_DEFER)
 248			dev_err(dev, "Failed to get DMA channel\n");
 249		return ret;
 250	}
 251	init_completion(&epf_test->transfer_complete);
 252
 253	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
 254
 255	return 0;
 256}
 257
 258/**
 259 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
 260 * @epf_test: the EPF test device that performs data transfer operation
 261 *
 262 * Helper to cleanup EPF test DMA channel.
 263 */
 264static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
 265{
 266	if (!epf_test->dma_supported)
 267		return;
 268
 269	dma_release_channel(epf_test->dma_chan_tx);
 270	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
 271		epf_test->dma_chan_tx = NULL;
 272		epf_test->dma_chan_rx = NULL;
 273		return;
 274	}
 275
 276	dma_release_channel(epf_test->dma_chan_rx);
 277	epf_test->dma_chan_rx = NULL;
 278
 279	return;
 280}
 281
 282static void pci_epf_test_print_rate(const char *ops, u64 size,
 283				    struct timespec64 *start,
 284				    struct timespec64 *end, bool dma)
 285{
 286	struct timespec64 ts;
 287	u64 rate, ns;
 288
 289	ts = timespec64_sub(*end, *start);
 290
 291	/* convert both size (stored in 'rate') and time in terms of 'ns' */
 292	ns = timespec64_to_ns(&ts);
 293	rate = size * NSEC_PER_SEC;
 294
 295	/* Divide both size (stored in 'rate') and ns by a common factor */
 296	while (ns > UINT_MAX) {
 297		rate >>= 1;
 298		ns >>= 1;
 299	}
 300
 301	if (!ns)
 302		return;
 303
 304	/* calculate the rate */
 305	do_div(rate, (uint32_t)ns);
 306
 307	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
 308		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
 309		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
 310}
 311
 312static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 313{
 314	int ret;
 315	bool use_dma;
 316	void __iomem *src_addr;
 317	void __iomem *dst_addr;
 318	phys_addr_t src_phys_addr;
 319	phys_addr_t dst_phys_addr;
 320	struct timespec64 start, end;
 321	struct pci_epf *epf = epf_test->epf;
 322	struct device *dev = &epf->dev;
 323	struct pci_epc *epc = epf->epc;
 324	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 325	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 326
 327	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 328	if (!src_addr) {
 329		dev_err(dev, "Failed to allocate source address\n");
 330		reg->status = STATUS_SRC_ADDR_INVALID;
 331		ret = -ENOMEM;
 332		goto err;
 333	}
 334
 335	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
 336			       reg->src_addr, reg->size);
 337	if (ret) {
 338		dev_err(dev, "Failed to map source address\n");
 339		reg->status = STATUS_SRC_ADDR_INVALID;
 340		goto err_src_addr;
 341	}
 342
 343	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
 344	if (!dst_addr) {
 345		dev_err(dev, "Failed to allocate destination address\n");
 346		reg->status = STATUS_DST_ADDR_INVALID;
 347		ret = -ENOMEM;
 348		goto err_src_map_addr;
 349	}
 350
 351	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
 352			       reg->dst_addr, reg->size);
 353	if (ret) {
 354		dev_err(dev, "Failed to map destination address\n");
 355		reg->status = STATUS_DST_ADDR_INVALID;
 356		goto err_dst_addr;
 357	}
 358
 359	ktime_get_ts64(&start);
 360	use_dma = !!(reg->flags & FLAG_USE_DMA);
 361	if (use_dma) {
 362		if (!epf_test->dma_supported) {
 363			dev_err(dev, "Cannot transfer data using DMA\n");
 364			ret = -EINVAL;
 365			goto err_map_addr;
 366		}
 367
 368		if (epf_test->dma_private) {
 369			dev_err(dev, "Cannot transfer data using DMA\n");
 370			ret = -EINVAL;
 371			goto err_map_addr;
 372		}
 373
 374		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
 375						 src_phys_addr, reg->size, 0,
 376						 DMA_MEM_TO_MEM);
 377		if (ret)
 378			dev_err(dev, "Data transfer failed\n");
 379	} else {
 380		void *buf;
 381
 382		buf = kzalloc(reg->size, GFP_KERNEL);
 383		if (!buf) {
 384			ret = -ENOMEM;
 385			goto err_map_addr;
 386		}
 387
 388		memcpy_fromio(buf, src_addr, reg->size);
 389		memcpy_toio(dst_addr, buf, reg->size);
 390		kfree(buf);
 391	}
 392	ktime_get_ts64(&end);
 393	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
 394
 395err_map_addr:
 396	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
 397
 398err_dst_addr:
 399	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
 400
 401err_src_map_addr:
 402	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
 403
 404err_src_addr:
 405	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
 406
 407err:
 408	return ret;
 409}
 410
 411static int pci_epf_test_read(struct pci_epf_test *epf_test)
 412{
 413	int ret;
 414	void __iomem *src_addr;
 415	void *buf;
 416	u32 crc32;
 417	bool use_dma;
 418	phys_addr_t phys_addr;
 419	phys_addr_t dst_phys_addr;
 420	struct timespec64 start, end;
 421	struct pci_epf *epf = epf_test->epf;
 422	struct device *dev = &epf->dev;
 423	struct pci_epc *epc = epf->epc;
 424	struct device *dma_dev = epf->epc->dev.parent;
 425	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 426	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 427
 428	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 429	if (!src_addr) {
 430		dev_err(dev, "Failed to allocate address\n");
 431		reg->status = STATUS_SRC_ADDR_INVALID;
 432		ret = -ENOMEM;
 433		goto err;
 434	}
 435
 436	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
 437			       reg->src_addr, reg->size);
 438	if (ret) {
 439		dev_err(dev, "Failed to map address\n");
 440		reg->status = STATUS_SRC_ADDR_INVALID;
 441		goto err_addr;
 442	}
 443
 444	buf = kzalloc(reg->size, GFP_KERNEL);
 445	if (!buf) {
 446		ret = -ENOMEM;
 447		goto err_map_addr;
 448	}
 449
 450	use_dma = !!(reg->flags & FLAG_USE_DMA);
 451	if (use_dma) {
 452		if (!epf_test->dma_supported) {
 453			dev_err(dev, "Cannot transfer data using DMA\n");
 454			ret = -EINVAL;
 455			goto err_dma_map;
 456		}
 457
 458		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
 459					       DMA_FROM_DEVICE);
 460		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
 461			dev_err(dev, "Failed to map destination buffer addr\n");
 462			ret = -ENOMEM;
 463			goto err_dma_map;
 464		}
 465
 466		ktime_get_ts64(&start);
 467		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
 468						 phys_addr, reg->size,
 469						 reg->src_addr, DMA_DEV_TO_MEM);
 470		if (ret)
 471			dev_err(dev, "Data transfer failed\n");
 472		ktime_get_ts64(&end);
 473
 474		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
 475				 DMA_FROM_DEVICE);
 476	} else {
 477		ktime_get_ts64(&start);
 478		memcpy_fromio(buf, src_addr, reg->size);
 479		ktime_get_ts64(&end);
 480	}
 481
 482	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
 483
 484	crc32 = crc32_le(~0, buf, reg->size);
 485	if (crc32 != reg->checksum)
 486		ret = -EIO;
 487
 488err_dma_map:
 489	kfree(buf);
 490
 491err_map_addr:
 492	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
 493
 494err_addr:
 495	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
 496
 497err:
 498	return ret;
 499}
 500
 501static int pci_epf_test_write(struct pci_epf_test *epf_test)
 502{
 503	int ret;
 504	void __iomem *dst_addr;
 505	void *buf;
 506	bool use_dma;
 507	phys_addr_t phys_addr;
 508	phys_addr_t src_phys_addr;
 509	struct timespec64 start, end;
 510	struct pci_epf *epf = epf_test->epf;
 511	struct device *dev = &epf->dev;
 512	struct pci_epc *epc = epf->epc;
 513	struct device *dma_dev = epf->epc->dev.parent;
 514	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 515	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 516
 517	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 518	if (!dst_addr) {
 519		dev_err(dev, "Failed to allocate address\n");
 520		reg->status = STATUS_DST_ADDR_INVALID;
 521		ret = -ENOMEM;
 522		goto err;
 523	}
 524
 525	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
 526			       reg->dst_addr, reg->size);
 527	if (ret) {
 528		dev_err(dev, "Failed to map address\n");
 529		reg->status = STATUS_DST_ADDR_INVALID;
 530		goto err_addr;
 531	}
 532
 533	buf = kzalloc(reg->size, GFP_KERNEL);
 534	if (!buf) {
 535		ret = -ENOMEM;
 536		goto err_map_addr;
 537	}
 538
 539	get_random_bytes(buf, reg->size);
 540	reg->checksum = crc32_le(~0, buf, reg->size);
 541
 542	use_dma = !!(reg->flags & FLAG_USE_DMA);
 543	if (use_dma) {
 544		if (!epf_test->dma_supported) {
 545			dev_err(dev, "Cannot transfer data using DMA\n");
 546			ret = -EINVAL;
 547			goto err_dma_map;
 548		}
 549
 550		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
 551					       DMA_TO_DEVICE);
 552		if (dma_mapping_error(dma_dev, src_phys_addr)) {
 553			dev_err(dev, "Failed to map source buffer addr\n");
 554			ret = -ENOMEM;
 555			goto err_dma_map;
 556		}
 557
 558		ktime_get_ts64(&start);
 559
 560		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
 561						 src_phys_addr, reg->size,
 562						 reg->dst_addr,
 563						 DMA_MEM_TO_DEV);
 564		if (ret)
 565			dev_err(dev, "Data transfer failed\n");
 566		ktime_get_ts64(&end);
 567
 568		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
 569				 DMA_TO_DEVICE);
 570	} else {
 571		ktime_get_ts64(&start);
 572		memcpy_toio(dst_addr, buf, reg->size);
 573		ktime_get_ts64(&end);
 574	}
 575
 576	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
 577
 578	/*
 579	 * wait 1ms inorder for the write to complete. Without this delay L3
 580	 * error in observed in the host system.
 581	 */
 582	usleep_range(1000, 2000);
 583
 584err_dma_map:
 585	kfree(buf);
 586
 587err_map_addr:
 588	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
 589
 590err_addr:
 591	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
 592
 593err:
 594	return ret;
 595}
 596
 597static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
 598				   u16 irq)
 599{
 
 600	struct pci_epf *epf = epf_test->epf;
 601	struct device *dev = &epf->dev;
 602	struct pci_epc *epc = epf->epc;
 603	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 604	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 605
 606	reg->status |= STATUS_IRQ_RAISED;
 607
 608	switch (irq_type) {
 609	case IRQ_TYPE_LEGACY:
 610		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 611				  PCI_EPC_IRQ_LEGACY, 0);
 612		break;
 613	case IRQ_TYPE_MSI:
 614		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 615				  PCI_EPC_IRQ_MSI, irq);
 616		break;
 617	case IRQ_TYPE_MSIX:
 618		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 619				  PCI_EPC_IRQ_MSIX, irq);
 620		break;
 621	default:
 622		dev_err(dev, "Failed to raise IRQ, unknown type\n");
 623		break;
 624	}
 625}
 626
 627static void pci_epf_test_cmd_handler(struct work_struct *work)
 628{
 629	int ret;
 630	int count;
 
 631	u32 command;
 632	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
 633						     cmd_handler.work);
 634	struct pci_epf *epf = epf_test->epf;
 635	struct device *dev = &epf->dev;
 636	struct pci_epc *epc = epf->epc;
 637	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 638	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 639
 640	command = reg->command;
 641	if (!command)
 642		goto reset_handler;
 643
 644	reg->command = 0;
 645	reg->status = 0;
 646
 647	if (reg->irq_type > IRQ_TYPE_MSIX) {
 648		dev_err(dev, "Failed to detect IRQ type\n");
 649		goto reset_handler;
 650	}
 651
 652	if (command & COMMAND_RAISE_LEGACY_IRQ) {
 653		reg->status = STATUS_IRQ_RAISED;
 654		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 655				  PCI_EPC_IRQ_LEGACY, 0);
 656		goto reset_handler;
 657	}
 658
 659	if (command & COMMAND_WRITE) {
 660		ret = pci_epf_test_write(epf_test);
 661		if (ret)
 662			reg->status |= STATUS_WRITE_FAIL;
 663		else
 664			reg->status |= STATUS_WRITE_SUCCESS;
 665		pci_epf_test_raise_irq(epf_test, reg->irq_type,
 666				       reg->irq_number);
 667		goto reset_handler;
 668	}
 669
 670	if (command & COMMAND_READ) {
 671		ret = pci_epf_test_read(epf_test);
 672		if (!ret)
 673			reg->status |= STATUS_READ_SUCCESS;
 674		else
 675			reg->status |= STATUS_READ_FAIL;
 676		pci_epf_test_raise_irq(epf_test, reg->irq_type,
 677				       reg->irq_number);
 678		goto reset_handler;
 679	}
 680
 681	if (command & COMMAND_COPY) {
 682		ret = pci_epf_test_copy(epf_test);
 683		if (!ret)
 684			reg->status |= STATUS_COPY_SUCCESS;
 685		else
 686			reg->status |= STATUS_COPY_FAIL;
 687		pci_epf_test_raise_irq(epf_test, reg->irq_type,
 688				       reg->irq_number);
 689		goto reset_handler;
 690	}
 691
 692	if (command & COMMAND_RAISE_MSI_IRQ) {
 693		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
 694		if (reg->irq_number > count || count <= 0)
 695			goto reset_handler;
 696		reg->status = STATUS_IRQ_RAISED;
 697		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 698				  PCI_EPC_IRQ_MSI, reg->irq_number);
 699		goto reset_handler;
 700	}
 701
 702	if (command & COMMAND_RAISE_MSIX_IRQ) {
 703		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
 704		if (reg->irq_number > count || count <= 0)
 705			goto reset_handler;
 706		reg->status = STATUS_IRQ_RAISED;
 707		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 708				  PCI_EPC_IRQ_MSIX, reg->irq_number);
 709		goto reset_handler;
 710	}
 711
 712reset_handler:
 713	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 714			   msecs_to_jiffies(1));
 715}
 716
 
 
 
 
 
 
 
 
 717static void pci_epf_test_unbind(struct pci_epf *epf)
 718{
 719	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 720	struct pci_epc *epc = epf->epc;
 721	struct pci_epf_bar *epf_bar;
 722	int bar;
 723
 724	cancel_delayed_work(&epf_test->cmd_handler);
 725	pci_epf_test_clean_dma_chan(epf_test);
 726	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 727		epf_bar = &epf->bar[bar];
 728
 729		if (epf_test->reg[bar]) {
 730			pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
 731					  epf_bar);
 732			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 733					   PRIMARY_INTERFACE);
 734		}
 735	}
 736}
 737
 738static int pci_epf_test_set_bar(struct pci_epf *epf)
 739{
 740	int bar, add;
 741	int ret;
 742	struct pci_epf_bar *epf_bar;
 743	struct pci_epc *epc = epf->epc;
 744	struct device *dev = &epf->dev;
 745	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 746	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 747	const struct pci_epc_features *epc_features;
 748
 749	epc_features = epf_test->epc_features;
 750
 751	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
 752		epf_bar = &epf->bar[bar];
 753		/*
 754		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
 755		 * if the specific implementation required a 64-bit BAR,
 756		 * even if we only requested a 32-bit BAR.
 757		 */
 758		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 759
 760		if (!!(epc_features->reserved_bar & (1 << bar)))
 761			continue;
 
 762
 763		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
 764				      epf_bar);
 765		if (ret) {
 766			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 767					   PRIMARY_INTERFACE);
 768			dev_err(dev, "Failed to set BAR%d\n", bar);
 769			if (bar == test_reg_bar)
 770				return ret;
 771		}
 
 
 
 
 
 
 
 772	}
 773
 774	return 0;
 775}
 776
 777static int pci_epf_test_core_init(struct pci_epf *epf)
 778{
 779	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 780	struct pci_epf_header *header = epf->header;
 781	const struct pci_epc_features *epc_features;
 782	struct pci_epc *epc = epf->epc;
 783	struct device *dev = &epf->dev;
 784	bool msix_capable = false;
 785	bool msi_capable = true;
 786	int ret;
 787
 788	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 789	if (epc_features) {
 790		msix_capable = epc_features->msix_capable;
 791		msi_capable = epc_features->msi_capable;
 792	}
 793
 794	if (epf->vfunc_no <= 1) {
 795		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
 796		if (ret) {
 797			dev_err(dev, "Configuration header write failed\n");
 798			return ret;
 799		}
 800	}
 801
 802	ret = pci_epf_test_set_bar(epf);
 803	if (ret)
 804		return ret;
 805
 806	if (msi_capable) {
 807		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
 808				      epf->msi_interrupts);
 809		if (ret) {
 810			dev_err(dev, "MSI configuration failed\n");
 811			return ret;
 812		}
 813	}
 814
 815	if (msix_capable) {
 816		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
 817				       epf->msix_interrupts,
 818				       epf_test->test_reg_bar,
 819				       epf_test->msix_table_offset);
 820		if (ret) {
 821			dev_err(dev, "MSI-X configuration failed\n");
 822			return ret;
 823		}
 824	}
 825
 826	return 0;
 827}
 828
 829static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
 830				 void *data)
 831{
 832	struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
 833	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 834	int ret;
 835
 836	switch (val) {
 837	case CORE_INIT:
 838		ret = pci_epf_test_core_init(epf);
 839		if (ret)
 840			return NOTIFY_BAD;
 841		break;
 842
 843	case LINK_UP:
 844		queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 845				   msecs_to_jiffies(1));
 846		break;
 847
 848	default:
 849		dev_err(&epf->dev, "Invalid EPF test notifier event\n");
 850		return NOTIFY_BAD;
 851	}
 852
 853	return NOTIFY_OK;
 854}
 855
 856static int pci_epf_test_alloc_space(struct pci_epf *epf)
 857{
 858	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 859	struct device *dev = &epf->dev;
 860	struct pci_epf_bar *epf_bar;
 861	size_t msix_table_size = 0;
 862	size_t test_reg_bar_size;
 863	size_t pba_size = 0;
 864	bool msix_capable;
 865	void *base;
 866	int bar, add;
 867	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 868	const struct pci_epc_features *epc_features;
 869	size_t test_reg_size;
 870
 871	epc_features = epf_test->epc_features;
 872
 873	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
 874
 875	msix_capable = epc_features->msix_capable;
 876	if (msix_capable) {
 877		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
 878		epf_test->msix_table_offset = test_reg_bar_size;
 879		/* Align to QWORD or 8 Bytes */
 880		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
 881	}
 882	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
 883
 884	if (epc_features->bar_fixed_size[test_reg_bar]) {
 885		if (test_reg_size > bar_size[test_reg_bar])
 886			return -ENOMEM;
 887		test_reg_size = bar_size[test_reg_bar];
 888	}
 889
 890	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
 891				   epc_features->align, PRIMARY_INTERFACE);
 892	if (!base) {
 893		dev_err(dev, "Failed to allocated register space\n");
 894		return -ENOMEM;
 895	}
 896	epf_test->reg[test_reg_bar] = base;
 897
 898	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
 899		epf_bar = &epf->bar[bar];
 900		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 901
 902		if (bar == test_reg_bar)
 903			continue;
 904
 905		if (!!(epc_features->reserved_bar & (1 << bar)))
 906			continue;
 907
 908		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
 909					   epc_features->align,
 910					   PRIMARY_INTERFACE);
 911		if (!base)
 912			dev_err(dev, "Failed to allocate space for BAR%d\n",
 913				bar);
 914		epf_test->reg[bar] = base;
 915	}
 916
 917	return 0;
 918}
 919
 920static void pci_epf_configure_bar(struct pci_epf *epf,
 921				  const struct pci_epc_features *epc_features)
 922{
 923	struct pci_epf_bar *epf_bar;
 924	bool bar_fixed_64bit;
 925	int i;
 926
 927	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 928		epf_bar = &epf->bar[i];
 929		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
 930		if (bar_fixed_64bit)
 931			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
 932		if (epc_features->bar_fixed_size[i])
 933			bar_size[i] = epc_features->bar_fixed_size[i];
 934	}
 935}
 936
 937static int pci_epf_test_bind(struct pci_epf *epf)
 938{
 939	int ret;
 940	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 941	const struct pci_epc_features *epc_features;
 942	enum pci_barno test_reg_bar = BAR_0;
 943	struct pci_epc *epc = epf->epc;
 944	bool linkup_notifier = false;
 945	bool core_init_notifier = false;
 946
 947	if (WARN_ON_ONCE(!epc))
 948		return -EINVAL;
 949
 950	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 951	if (!epc_features) {
 952		dev_err(&epf->dev, "epc_features not implemented\n");
 953		return -EOPNOTSUPP;
 954	}
 955
 956	linkup_notifier = epc_features->linkup_notifier;
 957	core_init_notifier = epc_features->core_init_notifier;
 958	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
 959	if (test_reg_bar < 0)
 960		return -EINVAL;
 961	pci_epf_configure_bar(epf, epc_features);
 962
 963	epf_test->test_reg_bar = test_reg_bar;
 964	epf_test->epc_features = epc_features;
 965
 966	ret = pci_epf_test_alloc_space(epf);
 967	if (ret)
 968		return ret;
 969
 970	if (!core_init_notifier) {
 971		ret = pci_epf_test_core_init(epf);
 972		if (ret)
 973			return ret;
 974	}
 975
 976	epf_test->dma_supported = true;
 977
 978	ret = pci_epf_test_init_dma_chan(epf_test);
 979	if (ret)
 980		epf_test->dma_supported = false;
 981
 982	if (linkup_notifier || core_init_notifier) {
 983		epf->nb.notifier_call = pci_epf_test_notifier;
 984		pci_epc_register_notifier(epc, &epf->nb);
 985	} else {
 986		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
 987	}
 988
 989	return 0;
 990}
 991
 992static const struct pci_epf_device_id pci_epf_test_ids[] = {
 993	{
 994		.name = "pci_epf_test",
 995	},
 996	{},
 997};
 998
 999static int pci_epf_test_probe(struct pci_epf *epf)
1000{
1001	struct pci_epf_test *epf_test;
1002	struct device *dev = &epf->dev;
 
 
 
 
 
 
 
 
 
 
 
1003
1004	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1005	if (!epf_test)
1006		return -ENOMEM;
1007
1008	epf->header = &test_header;
1009	epf_test->epf = epf;
 
 
1010
1011	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1012
1013	epf_set_drvdata(epf, epf_test);
1014	return 0;
1015}
1016
1017static struct pci_epf_ops ops = {
1018	.unbind	= pci_epf_test_unbind,
1019	.bind	= pci_epf_test_bind,
 
1020};
1021
1022static struct pci_epf_driver test_driver = {
1023	.driver.name	= "pci_epf_test",
1024	.probe		= pci_epf_test_probe,
1025	.id_table	= pci_epf_test_ids,
1026	.ops		= &ops,
1027	.owner		= THIS_MODULE,
1028};
1029
1030static int __init pci_epf_test_init(void)
1031{
1032	int ret;
1033
1034	kpcitest_workqueue = alloc_workqueue("kpcitest",
1035					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1036	if (!kpcitest_workqueue) {
1037		pr_err("Failed to allocate the kpcitest work queue\n");
1038		return -ENOMEM;
1039	}
1040
1041	ret = pci_epf_register_driver(&test_driver);
1042	if (ret) {
1043		destroy_workqueue(kpcitest_workqueue);
1044		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1045		return ret;
1046	}
1047
1048	return 0;
1049}
1050module_init(pci_epf_test_init);
1051
1052static void __exit pci_epf_test_exit(void)
1053{
1054	if (kpcitest_workqueue)
1055		destroy_workqueue(kpcitest_workqueue);
1056	pci_epf_unregister_driver(&test_driver);
1057}
1058module_exit(pci_epf_test_exit);
1059
1060MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1061MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1062MODULE_LICENSE("GPL v2");
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * Test driver to test endpoint functionality
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 
 11#include <linux/io.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/pci_ids.h>
 15#include <linux/random.h>
 16
 17#include <linux/pci-epc.h>
 18#include <linux/pci-epf.h>
 19#include <linux/pci_regs.h>
 20
 
 
 
 
 21#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
 22#define COMMAND_RAISE_MSI_IRQ		BIT(1)
 23#define MSI_NUMBER_SHIFT		2
 24#define MSI_NUMBER_MASK			(0x3f << MSI_NUMBER_SHIFT)
 25#define COMMAND_READ			BIT(8)
 26#define COMMAND_WRITE			BIT(9)
 27#define COMMAND_COPY			BIT(10)
 28
 29#define STATUS_READ_SUCCESS		BIT(0)
 30#define STATUS_READ_FAIL		BIT(1)
 31#define STATUS_WRITE_SUCCESS		BIT(2)
 32#define STATUS_WRITE_FAIL		BIT(3)
 33#define STATUS_COPY_SUCCESS		BIT(4)
 34#define STATUS_COPY_FAIL		BIT(5)
 35#define STATUS_IRQ_RAISED		BIT(6)
 36#define STATUS_SRC_ADDR_INVALID		BIT(7)
 37#define STATUS_DST_ADDR_INVALID		BIT(8)
 38
 
 
 39#define TIMER_RESOLUTION		1
 40
 41static struct workqueue_struct *kpcitest_workqueue;
 42
 43struct pci_epf_test {
 44	void			*reg[6];
 45	struct pci_epf		*epf;
 46	enum pci_barno		test_reg_bar;
 47	bool			linkup_notifier;
 48	struct delayed_work	cmd_handler;
 
 
 
 
 
 
 49};
 50
 51struct pci_epf_test_reg {
 52	u32	magic;
 53	u32	command;
 54	u32	status;
 55	u64	src_addr;
 56	u64	dst_addr;
 57	u32	size;
 58	u32	checksum;
 
 
 
 59} __packed;
 60
 61static struct pci_epf_header test_header = {
 62	.vendorid	= PCI_ANY_ID,
 63	.deviceid	= PCI_ANY_ID,
 64	.baseclass_code = PCI_CLASS_OTHERS,
 65	.interrupt_pin	= PCI_INTERRUPT_INTA,
 66};
 67
 68struct pci_epf_test_data {
 69	enum pci_barno	test_reg_bar;
 70	bool		linkup_notifier;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71};
 72
 73static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74
 75static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 76{
 77	int ret;
 
 78	void __iomem *src_addr;
 79	void __iomem *dst_addr;
 80	phys_addr_t src_phys_addr;
 81	phys_addr_t dst_phys_addr;
 
 82	struct pci_epf *epf = epf_test->epf;
 83	struct device *dev = &epf->dev;
 84	struct pci_epc *epc = epf->epc;
 85	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 86	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 87
 88	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 89	if (!src_addr) {
 90		dev_err(dev, "failed to allocate source address\n");
 91		reg->status = STATUS_SRC_ADDR_INVALID;
 92		ret = -ENOMEM;
 93		goto err;
 94	}
 95
 96	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
 97			       reg->size);
 98	if (ret) {
 99		dev_err(dev, "failed to map source address\n");
100		reg->status = STATUS_SRC_ADDR_INVALID;
101		goto err_src_addr;
102	}
103
104	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
105	if (!dst_addr) {
106		dev_err(dev, "failed to allocate destination address\n");
107		reg->status = STATUS_DST_ADDR_INVALID;
108		ret = -ENOMEM;
109		goto err_src_map_addr;
110	}
111
112	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
113			       reg->size);
114	if (ret) {
115		dev_err(dev, "failed to map destination address\n");
116		reg->status = STATUS_DST_ADDR_INVALID;
117		goto err_dst_addr;
118	}
119
120	memcpy(dst_addr, src_addr, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
 
123
124err_dst_addr:
125	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
126
127err_src_map_addr:
128	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
129
130err_src_addr:
131	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
132
133err:
134	return ret;
135}
136
137static int pci_epf_test_read(struct pci_epf_test *epf_test)
138{
139	int ret;
140	void __iomem *src_addr;
141	void *buf;
142	u32 crc32;
 
143	phys_addr_t phys_addr;
 
 
144	struct pci_epf *epf = epf_test->epf;
145	struct device *dev = &epf->dev;
146	struct pci_epc *epc = epf->epc;
 
147	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
148	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
149
150	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
151	if (!src_addr) {
152		dev_err(dev, "failed to allocate address\n");
153		reg->status = STATUS_SRC_ADDR_INVALID;
154		ret = -ENOMEM;
155		goto err;
156	}
157
158	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
159			       reg->size);
160	if (ret) {
161		dev_err(dev, "failed to map address\n");
162		reg->status = STATUS_SRC_ADDR_INVALID;
163		goto err_addr;
164	}
165
166	buf = kzalloc(reg->size, GFP_KERNEL);
167	if (!buf) {
168		ret = -ENOMEM;
169		goto err_map_addr;
170	}
171
172	memcpy(buf, src_addr, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
174	crc32 = crc32_le(~0, buf, reg->size);
175	if (crc32 != reg->checksum)
176		ret = -EIO;
177
 
178	kfree(buf);
179
180err_map_addr:
181	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
182
183err_addr:
184	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
185
186err:
187	return ret;
188}
189
190static int pci_epf_test_write(struct pci_epf_test *epf_test)
191{
192	int ret;
193	void __iomem *dst_addr;
194	void *buf;
 
195	phys_addr_t phys_addr;
 
 
196	struct pci_epf *epf = epf_test->epf;
197	struct device *dev = &epf->dev;
198	struct pci_epc *epc = epf->epc;
 
199	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
200	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
201
202	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
203	if (!dst_addr) {
204		dev_err(dev, "failed to allocate address\n");
205		reg->status = STATUS_DST_ADDR_INVALID;
206		ret = -ENOMEM;
207		goto err;
208	}
209
210	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
211			       reg->size);
212	if (ret) {
213		dev_err(dev, "failed to map address\n");
214		reg->status = STATUS_DST_ADDR_INVALID;
215		goto err_addr;
216	}
217
218	buf = kzalloc(reg->size, GFP_KERNEL);
219	if (!buf) {
220		ret = -ENOMEM;
221		goto err_map_addr;
222	}
223
224	get_random_bytes(buf, reg->size);
225	reg->checksum = crc32_le(~0, buf, reg->size);
226
227	memcpy(dst_addr, buf, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
229	/*
230	 * wait 1ms inorder for the write to complete. Without this delay L3
231	 * error in observed in the host system.
232	 */
233	mdelay(1);
234
 
235	kfree(buf);
236
237err_map_addr:
238	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
239
240err_addr:
241	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
242
243err:
244	return ret;
245}
246
247static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
 
248{
249	u8 msi_count;
250	struct pci_epf *epf = epf_test->epf;
 
251	struct pci_epc *epc = epf->epc;
252	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
253	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
254
255	reg->status |= STATUS_IRQ_RAISED;
256	msi_count = pci_epc_get_msi(epc, epf->func_no);
257	if (irq > msi_count || msi_count <= 0)
258		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
259	else
260		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
 
 
 
 
 
 
 
 
 
 
 
 
 
261}
262
263static void pci_epf_test_cmd_handler(struct work_struct *work)
264{
265	int ret;
266	u8 irq;
267	u8 msi_count;
268	u32 command;
269	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
270						     cmd_handler.work);
271	struct pci_epf *epf = epf_test->epf;
 
272	struct pci_epc *epc = epf->epc;
273	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
274	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
275
276	command = reg->command;
277	if (!command)
278		goto reset_handler;
279
280	reg->command = 0;
281	reg->status = 0;
282
283	irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
 
 
 
284
285	if (command & COMMAND_RAISE_LEGACY_IRQ) {
286		reg->status = STATUS_IRQ_RAISED;
287		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
 
288		goto reset_handler;
289	}
290
291	if (command & COMMAND_WRITE) {
292		ret = pci_epf_test_write(epf_test);
293		if (ret)
294			reg->status |= STATUS_WRITE_FAIL;
295		else
296			reg->status |= STATUS_WRITE_SUCCESS;
297		pci_epf_test_raise_irq(epf_test, irq);
 
298		goto reset_handler;
299	}
300
301	if (command & COMMAND_READ) {
302		ret = pci_epf_test_read(epf_test);
303		if (!ret)
304			reg->status |= STATUS_READ_SUCCESS;
305		else
306			reg->status |= STATUS_READ_FAIL;
307		pci_epf_test_raise_irq(epf_test, irq);
 
308		goto reset_handler;
309	}
310
311	if (command & COMMAND_COPY) {
312		ret = pci_epf_test_copy(epf_test);
313		if (!ret)
314			reg->status |= STATUS_COPY_SUCCESS;
315		else
316			reg->status |= STATUS_COPY_FAIL;
317		pci_epf_test_raise_irq(epf_test, irq);
 
318		goto reset_handler;
319	}
320
321	if (command & COMMAND_RAISE_MSI_IRQ) {
322		msi_count = pci_epc_get_msi(epc, epf->func_no);
323		if (irq > msi_count || msi_count <= 0)
324			goto reset_handler;
325		reg->status = STATUS_IRQ_RAISED;
326		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
 
 
 
 
 
 
 
 
 
 
 
327		goto reset_handler;
328	}
329
330reset_handler:
331	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
332			   msecs_to_jiffies(1));
333}
334
335static void pci_epf_test_linkup(struct pci_epf *epf)
336{
337	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
338
339	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
340			   msecs_to_jiffies(1));
341}
342
343static void pci_epf_test_unbind(struct pci_epf *epf)
344{
345	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
346	struct pci_epc *epc = epf->epc;
347	struct pci_epf_bar *epf_bar;
348	int bar;
349
350	cancel_delayed_work(&epf_test->cmd_handler);
351	pci_epc_stop(epc);
352	for (bar = BAR_0; bar <= BAR_5; bar++) {
353		epf_bar = &epf->bar[bar];
354
355		if (epf_test->reg[bar]) {
356			pci_epf_free_space(epf, epf_test->reg[bar], bar);
357			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
 
 
358		}
359	}
360}
361
362static int pci_epf_test_set_bar(struct pci_epf *epf)
363{
364	int bar;
365	int ret;
366	struct pci_epf_bar *epf_bar;
367	struct pci_epc *epc = epf->epc;
368	struct device *dev = &epf->dev;
369	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
370	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 
 
 
371
372	for (bar = BAR_0; bar <= BAR_5; bar++) {
373		epf_bar = &epf->bar[bar];
 
 
 
 
 
 
374
375		epf_bar->flags |= upper_32_bits(epf_bar->size) ?
376			PCI_BASE_ADDRESS_MEM_TYPE_64 :
377			PCI_BASE_ADDRESS_MEM_TYPE_32;
378
379		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
 
380		if (ret) {
381			pci_epf_free_space(epf, epf_test->reg[bar], bar);
382			dev_err(dev, "failed to set BAR%d\n", bar);
 
383			if (bar == test_reg_bar)
384				return ret;
385		}
386		/*
387		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
388		 * if the specific implementation required a 64-bit BAR,
389		 * even if we only requested a 32-bit BAR.
390		 */
391		if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
392			bar++;
393	}
394
395	return 0;
396}
397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398static int pci_epf_test_alloc_space(struct pci_epf *epf)
399{
400	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
401	struct device *dev = &epf->dev;
 
 
 
 
 
402	void *base;
403	int bar;
404	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
406	base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
407				   test_reg_bar);
408	if (!base) {
409		dev_err(dev, "failed to allocated register space\n");
410		return -ENOMEM;
411	}
412	epf_test->reg[test_reg_bar] = base;
413
414	for (bar = BAR_0; bar <= BAR_5; bar++) {
 
 
 
415		if (bar == test_reg_bar)
416			continue;
417		base = pci_epf_alloc_space(epf, bar_size[bar], bar);
 
 
 
 
 
 
418		if (!base)
419			dev_err(dev, "failed to allocate space for BAR%d\n",
420				bar);
421		epf_test->reg[bar] = base;
422	}
423
424	return 0;
425}
426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427static int pci_epf_test_bind(struct pci_epf *epf)
428{
429	int ret;
430	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
431	struct pci_epf_header *header = epf->header;
 
432	struct pci_epc *epc = epf->epc;
433	struct device *dev = &epf->dev;
 
434
435	if (WARN_ON_ONCE(!epc))
436		return -EINVAL;
437
438	ret = pci_epc_write_header(epc, epf->func_no, header);
439	if (ret) {
440		dev_err(dev, "configuration header write failed\n");
441		return ret;
442	}
443
 
 
 
 
 
 
 
 
 
 
444	ret = pci_epf_test_alloc_space(epf);
445	if (ret)
446		return ret;
447
448	ret = pci_epf_test_set_bar(epf);
449	if (ret)
450		return ret;
 
 
 
 
451
452	ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
453	if (ret)
454		return ret;
455
456	if (!epf_test->linkup_notifier)
 
 
 
457		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
 
458
459	return 0;
460}
461
462static const struct pci_epf_device_id pci_epf_test_ids[] = {
463	{
464		.name = "pci_epf_test",
465	},
466	{},
467};
468
469static int pci_epf_test_probe(struct pci_epf *epf)
470{
471	struct pci_epf_test *epf_test;
472	struct device *dev = &epf->dev;
473	const struct pci_epf_device_id *match;
474	struct pci_epf_test_data *data;
475	enum pci_barno test_reg_bar = BAR_0;
476	bool linkup_notifier = true;
477
478	match = pci_epf_match_device(pci_epf_test_ids, epf);
479	data = (struct pci_epf_test_data *)match->driver_data;
480	if (data) {
481		test_reg_bar = data->test_reg_bar;
482		linkup_notifier = data->linkup_notifier;
483	}
484
485	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
486	if (!epf_test)
487		return -ENOMEM;
488
489	epf->header = &test_header;
490	epf_test->epf = epf;
491	epf_test->test_reg_bar = test_reg_bar;
492	epf_test->linkup_notifier = linkup_notifier;
493
494	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
495
496	epf_set_drvdata(epf, epf_test);
497	return 0;
498}
499
500static struct pci_epf_ops ops = {
501	.unbind	= pci_epf_test_unbind,
502	.bind	= pci_epf_test_bind,
503	.linkup = pci_epf_test_linkup,
504};
505
506static struct pci_epf_driver test_driver = {
507	.driver.name	= "pci_epf_test",
508	.probe		= pci_epf_test_probe,
509	.id_table	= pci_epf_test_ids,
510	.ops		= &ops,
511	.owner		= THIS_MODULE,
512};
513
514static int __init pci_epf_test_init(void)
515{
516	int ret;
517
518	kpcitest_workqueue = alloc_workqueue("kpcitest",
519					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 
 
 
 
 
520	ret = pci_epf_register_driver(&test_driver);
521	if (ret) {
522		pr_err("failed to register pci epf test driver --> %d\n", ret);
 
523		return ret;
524	}
525
526	return 0;
527}
528module_init(pci_epf_test_init);
529
530static void __exit pci_epf_test_exit(void)
531{
 
 
532	pci_epf_unregister_driver(&test_driver);
533}
534module_exit(pci_epf_test_exit);
535
536MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
537MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
538MODULE_LICENSE("GPL v2");