Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/dmaengine.h>
  12#include <linux/io.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/pci_ids.h>
  16#include <linux/random.h>
  17
  18#include <linux/pci-epc.h>
  19#include <linux/pci-epf.h>
  20#include <linux/pci_regs.h>
  21
  22#define IRQ_TYPE_LEGACY			0
  23#define IRQ_TYPE_MSI			1
  24#define IRQ_TYPE_MSIX			2
  25
  26#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
  27#define COMMAND_RAISE_MSI_IRQ		BIT(1)
  28#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
  29#define COMMAND_READ			BIT(3)
  30#define COMMAND_WRITE			BIT(4)
  31#define COMMAND_COPY			BIT(5)
  32
  33#define STATUS_READ_SUCCESS		BIT(0)
  34#define STATUS_READ_FAIL		BIT(1)
  35#define STATUS_WRITE_SUCCESS		BIT(2)
  36#define STATUS_WRITE_FAIL		BIT(3)
  37#define STATUS_COPY_SUCCESS		BIT(4)
  38#define STATUS_COPY_FAIL		BIT(5)
  39#define STATUS_IRQ_RAISED		BIT(6)
  40#define STATUS_SRC_ADDR_INVALID		BIT(7)
  41#define STATUS_DST_ADDR_INVALID		BIT(8)
  42
  43#define FLAG_USE_DMA			BIT(0)
  44
  45#define TIMER_RESOLUTION		1
  46
  47static struct workqueue_struct *kpcitest_workqueue;
  48
  49struct pci_epf_test {
  50	void			*reg[PCI_STD_NUM_BARS];
  51	struct pci_epf		*epf;
  52	enum pci_barno		test_reg_bar;
  53	size_t			msix_table_offset;
  54	struct delayed_work	cmd_handler;
  55	struct dma_chan		*dma_chan_tx;
  56	struct dma_chan		*dma_chan_rx;
 
 
 
  57	struct completion	transfer_complete;
  58	bool			dma_supported;
  59	bool			dma_private;
  60	const struct pci_epc_features *epc_features;
  61};
  62
  63struct pci_epf_test_reg {
  64	u32	magic;
  65	u32	command;
  66	u32	status;
  67	u64	src_addr;
  68	u64	dst_addr;
  69	u32	size;
  70	u32	checksum;
  71	u32	irq_type;
  72	u32	irq_number;
  73	u32	flags;
  74} __packed;
  75
  76static struct pci_epf_header test_header = {
  77	.vendorid	= PCI_ANY_ID,
  78	.deviceid	= PCI_ANY_ID,
  79	.baseclass_code = PCI_CLASS_OTHERS,
  80	.interrupt_pin	= PCI_INTERRUPT_INTA,
  81};
  82
  83static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
  84
  85static void pci_epf_test_dma_callback(void *param)
  86{
  87	struct pci_epf_test *epf_test = param;
 
  88
  89	complete(&epf_test->transfer_complete);
 
 
 
 
 
  90}
  91
  92/**
  93 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
  94 *				  data between PCIe EP and remote PCIe RC
  95 * @epf_test: the EPF test device that performs the data transfer operation
  96 * @dma_dst: The destination address of the data transfer. It can be a physical
  97 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
  98 * @dma_src: The source address of the data transfer. It can be a physical
  99 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 100 * @len: The size of the data transfer
 101 * @dma_remote: remote RC physical address
 102 * @dir: DMA transfer direction
 103 *
 104 * Function that uses dmaengine API to transfer data between PCIe EP and remote
 105 * PCIe RC. The source and destination address can be a physical address given
 106 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
 107 *
 108 * The function returns '0' on success and negative value on failure.
 109 */
 110static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
 111				      dma_addr_t dma_dst, dma_addr_t dma_src,
 112				      size_t len, dma_addr_t dma_remote,
 113				      enum dma_transfer_direction dir)
 114{
 115	struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
 116				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
 117	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
 118	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 119	struct pci_epf *epf = epf_test->epf;
 120	struct dma_async_tx_descriptor *tx;
 121	struct dma_slave_config sconf = {};
 122	struct device *dev = &epf->dev;
 123	dma_cookie_t cookie;
 124	int ret;
 125
 126	if (IS_ERR_OR_NULL(chan)) {
 127		dev_err(dev, "Invalid DMA memcpy channel\n");
 128		return -EINVAL;
 129	}
 130
 131	if (epf_test->dma_private) {
 132		sconf.direction = dir;
 133		if (dir == DMA_MEM_TO_DEV)
 134			sconf.dst_addr = dma_remote;
 135		else
 136			sconf.src_addr = dma_remote;
 137
 138		if (dmaengine_slave_config(chan, &sconf)) {
 139			dev_err(dev, "DMA slave config fail\n");
 140			return -EIO;
 141		}
 142		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
 143						 flags);
 144	} else {
 145		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
 146					       flags);
 147	}
 148
 149	if (!tx) {
 150		dev_err(dev, "Failed to prepare DMA memcpy\n");
 151		return -EIO;
 152	}
 153
 
 
 154	tx->callback = pci_epf_test_dma_callback;
 155	tx->callback_param = epf_test;
 156	cookie = tx->tx_submit(tx);
 157	reinit_completion(&epf_test->transfer_complete);
 158
 159	ret = dma_submit_error(cookie);
 160	if (ret) {
 161		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
 162		return -EIO;
 163	}
 164
 165	dma_async_issue_pending(chan);
 166	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
 167	if (ret < 0) {
 168		dmaengine_terminate_sync(chan);
 169		dev_err(dev, "DMA wait_for_completion_timeout\n");
 170		return -ETIMEDOUT;
 171	}
 172
 173	return 0;
 
 
 
 
 
 
 
 
 174}
 175
 176struct epf_dma_filter {
 177	struct device *dev;
 178	u32 dma_mask;
 179};
 180
 181static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
 182{
 183	struct epf_dma_filter *filter = node;
 184	struct dma_slave_caps caps;
 185
 186	memset(&caps, 0, sizeof(caps));
 187	dma_get_slave_caps(chan, &caps);
 188
 189	return chan->device->dev == filter->dev
 190		&& (filter->dma_mask & caps.directions);
 191}
 192
 193/**
 194 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
 195 * @epf_test: the EPF test device that performs data transfer operation
 196 *
 197 * Function to initialize EPF test DMA channel.
 198 */
 199static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
 200{
 201	struct pci_epf *epf = epf_test->epf;
 202	struct device *dev = &epf->dev;
 203	struct epf_dma_filter filter;
 204	struct dma_chan *dma_chan;
 205	dma_cap_mask_t mask;
 206	int ret;
 207
 208	filter.dev = epf->epc->dev.parent;
 209	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
 210
 211	dma_cap_zero(mask);
 212	dma_cap_set(DMA_SLAVE, mask);
 213	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 214	if (!dma_chan) {
 215		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
 216		goto fail_back_tx;
 217	}
 218
 219	epf_test->dma_chan_rx = dma_chan;
 220
 221	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
 222	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 223
 224	if (!dma_chan) {
 225		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
 226		goto fail_back_rx;
 227	}
 228
 229	epf_test->dma_chan_tx = dma_chan;
 230	epf_test->dma_private = true;
 231
 232	init_completion(&epf_test->transfer_complete);
 233
 234	return 0;
 235
 236fail_back_rx:
 237	dma_release_channel(epf_test->dma_chan_rx);
 238	epf_test->dma_chan_tx = NULL;
 239
 240fail_back_tx:
 241	dma_cap_zero(mask);
 242	dma_cap_set(DMA_MEMCPY, mask);
 243
 244	dma_chan = dma_request_chan_by_mask(&mask);
 245	if (IS_ERR(dma_chan)) {
 246		ret = PTR_ERR(dma_chan);
 247		if (ret != -EPROBE_DEFER)
 248			dev_err(dev, "Failed to get DMA channel\n");
 249		return ret;
 250	}
 251	init_completion(&epf_test->transfer_complete);
 252
 253	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
 254
 255	return 0;
 256}
 257
 258/**
 259 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
 260 * @epf_test: the EPF test device that performs data transfer operation
 261 *
 262 * Helper to cleanup EPF test DMA channel.
 263 */
 264static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
 265{
 266	if (!epf_test->dma_supported)
 267		return;
 268
 269	dma_release_channel(epf_test->dma_chan_tx);
 270	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
 271		epf_test->dma_chan_tx = NULL;
 272		epf_test->dma_chan_rx = NULL;
 273		return;
 274	}
 275
 276	dma_release_channel(epf_test->dma_chan_rx);
 277	epf_test->dma_chan_rx = NULL;
 278
 279	return;
 280}
 281
 282static void pci_epf_test_print_rate(const char *ops, u64 size,
 
 283				    struct timespec64 *start,
 284				    struct timespec64 *end, bool dma)
 285{
 286	struct timespec64 ts;
 287	u64 rate, ns;
 288
 289	ts = timespec64_sub(*end, *start);
 290
 291	/* convert both size (stored in 'rate') and time in terms of 'ns' */
 292	ns = timespec64_to_ns(&ts);
 293	rate = size * NSEC_PER_SEC;
 294
 295	/* Divide both size (stored in 'rate') and ns by a common factor */
 296	while (ns > UINT_MAX) {
 297		rate >>= 1;
 298		ns >>= 1;
 299	}
 300
 301	if (!ns)
 302		return;
 303
 304	/* calculate the rate */
 305	do_div(rate, (uint32_t)ns);
 
 
 306
 307	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
 308		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
 309		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
 
 310}
 311
 312static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 
 313{
 314	int ret;
 315	bool use_dma;
 316	void __iomem *src_addr;
 317	void __iomem *dst_addr;
 318	phys_addr_t src_phys_addr;
 319	phys_addr_t dst_phys_addr;
 320	struct timespec64 start, end;
 321	struct pci_epf *epf = epf_test->epf;
 322	struct device *dev = &epf->dev;
 323	struct pci_epc *epc = epf->epc;
 324	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 325	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 326
 327	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 328	if (!src_addr) {
 329		dev_err(dev, "Failed to allocate source address\n");
 330		reg->status = STATUS_SRC_ADDR_INVALID;
 331		ret = -ENOMEM;
 332		goto err;
 333	}
 334
 335	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
 336			       reg->src_addr, reg->size);
 337	if (ret) {
 338		dev_err(dev, "Failed to map source address\n");
 339		reg->status = STATUS_SRC_ADDR_INVALID;
 340		goto err_src_addr;
 341	}
 342
 343	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
 344	if (!dst_addr) {
 345		dev_err(dev, "Failed to allocate destination address\n");
 346		reg->status = STATUS_DST_ADDR_INVALID;
 347		ret = -ENOMEM;
 348		goto err_src_map_addr;
 349	}
 350
 351	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
 352			       reg->dst_addr, reg->size);
 353	if (ret) {
 354		dev_err(dev, "Failed to map destination address\n");
 355		reg->status = STATUS_DST_ADDR_INVALID;
 356		goto err_dst_addr;
 357	}
 358
 359	ktime_get_ts64(&start);
 360	use_dma = !!(reg->flags & FLAG_USE_DMA);
 361	if (use_dma) {
 362		if (!epf_test->dma_supported) {
 363			dev_err(dev, "Cannot transfer data using DMA\n");
 364			ret = -EINVAL;
 365			goto err_map_addr;
 366		}
 367
 368		if (epf_test->dma_private) {
 369			dev_err(dev, "Cannot transfer data using DMA\n");
 370			ret = -EINVAL;
 371			goto err_map_addr;
 372		}
 373
 374		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
 375						 src_phys_addr, reg->size, 0,
 376						 DMA_MEM_TO_MEM);
 377		if (ret)
 378			dev_err(dev, "Data transfer failed\n");
 379	} else {
 380		void *buf;
 381
 382		buf = kzalloc(reg->size, GFP_KERNEL);
 383		if (!buf) {
 384			ret = -ENOMEM;
 385			goto err_map_addr;
 386		}
 387
 388		memcpy_fromio(buf, src_addr, reg->size);
 389		memcpy_toio(dst_addr, buf, reg->size);
 390		kfree(buf);
 391	}
 392	ktime_get_ts64(&end);
 393	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
 
 394
 395err_map_addr:
 396	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
 397
 398err_dst_addr:
 399	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
 400
 401err_src_map_addr:
 402	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
 403
 404err_src_addr:
 405	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
 406
 407err:
 408	return ret;
 
 
 
 409}
 410
 411static int pci_epf_test_read(struct pci_epf_test *epf_test)
 
 412{
 413	int ret;
 414	void __iomem *src_addr;
 415	void *buf;
 416	u32 crc32;
 417	bool use_dma;
 418	phys_addr_t phys_addr;
 419	phys_addr_t dst_phys_addr;
 420	struct timespec64 start, end;
 421	struct pci_epf *epf = epf_test->epf;
 422	struct device *dev = &epf->dev;
 423	struct pci_epc *epc = epf->epc;
 424	struct device *dma_dev = epf->epc->dev.parent;
 425	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 426	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 427
 428	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 429	if (!src_addr) {
 430		dev_err(dev, "Failed to allocate address\n");
 431		reg->status = STATUS_SRC_ADDR_INVALID;
 432		ret = -ENOMEM;
 433		goto err;
 434	}
 435
 436	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
 437			       reg->src_addr, reg->size);
 438	if (ret) {
 439		dev_err(dev, "Failed to map address\n");
 440		reg->status = STATUS_SRC_ADDR_INVALID;
 441		goto err_addr;
 442	}
 443
 444	buf = kzalloc(reg->size, GFP_KERNEL);
 445	if (!buf) {
 446		ret = -ENOMEM;
 447		goto err_map_addr;
 448	}
 449
 450	use_dma = !!(reg->flags & FLAG_USE_DMA);
 451	if (use_dma) {
 452		if (!epf_test->dma_supported) {
 453			dev_err(dev, "Cannot transfer data using DMA\n");
 454			ret = -EINVAL;
 455			goto err_dma_map;
 456		}
 457
 458		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
 459					       DMA_FROM_DEVICE);
 460		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
 461			dev_err(dev, "Failed to map destination buffer addr\n");
 462			ret = -ENOMEM;
 463			goto err_dma_map;
 464		}
 465
 466		ktime_get_ts64(&start);
 467		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
 468						 phys_addr, reg->size,
 469						 reg->src_addr, DMA_DEV_TO_MEM);
 470		if (ret)
 471			dev_err(dev, "Data transfer failed\n");
 472		ktime_get_ts64(&end);
 473
 474		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
 475				 DMA_FROM_DEVICE);
 476	} else {
 477		ktime_get_ts64(&start);
 478		memcpy_fromio(buf, src_addr, reg->size);
 479		ktime_get_ts64(&end);
 480	}
 481
 482	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
 
 483
 484	crc32 = crc32_le(~0, buf, reg->size);
 485	if (crc32 != reg->checksum)
 486		ret = -EIO;
 487
 488err_dma_map:
 489	kfree(buf);
 490
 491err_map_addr:
 492	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
 493
 494err_addr:
 495	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
 496
 497err:
 498	return ret;
 
 
 
 499}
 500
 501static int pci_epf_test_write(struct pci_epf_test *epf_test)
 
 502{
 503	int ret;
 504	void __iomem *dst_addr;
 505	void *buf;
 506	bool use_dma;
 507	phys_addr_t phys_addr;
 508	phys_addr_t src_phys_addr;
 509	struct timespec64 start, end;
 510	struct pci_epf *epf = epf_test->epf;
 511	struct device *dev = &epf->dev;
 512	struct pci_epc *epc = epf->epc;
 513	struct device *dma_dev = epf->epc->dev.parent;
 514	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 515	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 516
 517	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 518	if (!dst_addr) {
 519		dev_err(dev, "Failed to allocate address\n");
 520		reg->status = STATUS_DST_ADDR_INVALID;
 521		ret = -ENOMEM;
 522		goto err;
 523	}
 524
 525	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
 526			       reg->dst_addr, reg->size);
 527	if (ret) {
 528		dev_err(dev, "Failed to map address\n");
 529		reg->status = STATUS_DST_ADDR_INVALID;
 530		goto err_addr;
 531	}
 532
 533	buf = kzalloc(reg->size, GFP_KERNEL);
 534	if (!buf) {
 535		ret = -ENOMEM;
 536		goto err_map_addr;
 537	}
 538
 539	get_random_bytes(buf, reg->size);
 540	reg->checksum = crc32_le(~0, buf, reg->size);
 541
 542	use_dma = !!(reg->flags & FLAG_USE_DMA);
 543	if (use_dma) {
 544		if (!epf_test->dma_supported) {
 545			dev_err(dev, "Cannot transfer data using DMA\n");
 546			ret = -EINVAL;
 547			goto err_dma_map;
 548		}
 549
 550		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
 551					       DMA_TO_DEVICE);
 552		if (dma_mapping_error(dma_dev, src_phys_addr)) {
 553			dev_err(dev, "Failed to map source buffer addr\n");
 554			ret = -ENOMEM;
 555			goto err_dma_map;
 556		}
 557
 558		ktime_get_ts64(&start);
 559
 560		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
 561						 src_phys_addr, reg->size,
 562						 reg->dst_addr,
 563						 DMA_MEM_TO_DEV);
 564		if (ret)
 565			dev_err(dev, "Data transfer failed\n");
 566		ktime_get_ts64(&end);
 567
 568		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
 569				 DMA_TO_DEVICE);
 570	} else {
 571		ktime_get_ts64(&start);
 572		memcpy_toio(dst_addr, buf, reg->size);
 573		ktime_get_ts64(&end);
 574	}
 575
 576	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
 
 577
 578	/*
 579	 * wait 1ms inorder for the write to complete. Without this delay L3
 580	 * error in observed in the host system.
 581	 */
 582	usleep_range(1000, 2000);
 583
 584err_dma_map:
 585	kfree(buf);
 586
 587err_map_addr:
 588	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
 589
 590err_addr:
 591	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
 592
 593err:
 594	return ret;
 
 
 
 595}
 596
 597static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
 598				   u16 irq)
 599{
 600	struct pci_epf *epf = epf_test->epf;
 601	struct device *dev = &epf->dev;
 602	struct pci_epc *epc = epf->epc;
 603	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 604	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 605
 606	reg->status |= STATUS_IRQ_RAISED;
 
 
 
 
 607
 608	switch (irq_type) {
 609	case IRQ_TYPE_LEGACY:
 610		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 611				  PCI_EPC_IRQ_LEGACY, 0);
 612		break;
 613	case IRQ_TYPE_MSI:
 
 
 
 
 
 
 614		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 615				  PCI_EPC_IRQ_MSI, irq);
 616		break;
 617	case IRQ_TYPE_MSIX:
 
 
 
 
 
 
 618		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 619				  PCI_EPC_IRQ_MSIX, irq);
 620		break;
 621	default:
 622		dev_err(dev, "Failed to raise IRQ, unknown type\n");
 623		break;
 624	}
 625}
 626
 627static void pci_epf_test_cmd_handler(struct work_struct *work)
 628{
 629	int ret;
 630	int count;
 631	u32 command;
 632	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
 633						     cmd_handler.work);
 634	struct pci_epf *epf = epf_test->epf;
 635	struct device *dev = &epf->dev;
 636	struct pci_epc *epc = epf->epc;
 637	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 638	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 639
 640	command = reg->command;
 641	if (!command)
 642		goto reset_handler;
 643
 644	reg->command = 0;
 645	reg->status = 0;
 646
 647	if (reg->irq_type > IRQ_TYPE_MSIX) {
 648		dev_err(dev, "Failed to detect IRQ type\n");
 649		goto reset_handler;
 650	}
 651
 652	if (command & COMMAND_RAISE_LEGACY_IRQ) {
 653		reg->status = STATUS_IRQ_RAISED;
 654		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 655				  PCI_EPC_IRQ_LEGACY, 0);
 656		goto reset_handler;
 657	}
 658
 659	if (command & COMMAND_WRITE) {
 660		ret = pci_epf_test_write(epf_test);
 661		if (ret)
 662			reg->status |= STATUS_WRITE_FAIL;
 663		else
 664			reg->status |= STATUS_WRITE_SUCCESS;
 665		pci_epf_test_raise_irq(epf_test, reg->irq_type,
 666				       reg->irq_number);
 667		goto reset_handler;
 668	}
 669
 670	if (command & COMMAND_READ) {
 671		ret = pci_epf_test_read(epf_test);
 672		if (!ret)
 673			reg->status |= STATUS_READ_SUCCESS;
 674		else
 675			reg->status |= STATUS_READ_FAIL;
 676		pci_epf_test_raise_irq(epf_test, reg->irq_type,
 677				       reg->irq_number);
 678		goto reset_handler;
 679	}
 680
 681	if (command & COMMAND_COPY) {
 682		ret = pci_epf_test_copy(epf_test);
 683		if (!ret)
 684			reg->status |= STATUS_COPY_SUCCESS;
 685		else
 686			reg->status |= STATUS_COPY_FAIL;
 687		pci_epf_test_raise_irq(epf_test, reg->irq_type,
 688				       reg->irq_number);
 689		goto reset_handler;
 690	}
 691
 692	if (command & COMMAND_RAISE_MSI_IRQ) {
 693		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
 694		if (reg->irq_number > count || count <= 0)
 695			goto reset_handler;
 696		reg->status = STATUS_IRQ_RAISED;
 697		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 698				  PCI_EPC_IRQ_MSI, reg->irq_number);
 699		goto reset_handler;
 700	}
 701
 702	if (command & COMMAND_RAISE_MSIX_IRQ) {
 703		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
 704		if (reg->irq_number > count || count <= 0)
 705			goto reset_handler;
 706		reg->status = STATUS_IRQ_RAISED;
 707		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 708				  PCI_EPC_IRQ_MSIX, reg->irq_number);
 709		goto reset_handler;
 
 
 
 
 
 
 
 
 
 
 
 
 
 710	}
 711
 712reset_handler:
 713	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 714			   msecs_to_jiffies(1));
 715}
 716
 717static void pci_epf_test_unbind(struct pci_epf *epf)
 718{
 719	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 720	struct pci_epc *epc = epf->epc;
 721	struct pci_epf_bar *epf_bar;
 722	int bar;
 723
 724	cancel_delayed_work(&epf_test->cmd_handler);
 725	pci_epf_test_clean_dma_chan(epf_test);
 726	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 727		epf_bar = &epf->bar[bar];
 728
 729		if (epf_test->reg[bar]) {
 730			pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
 731					  epf_bar);
 732			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 733					   PRIMARY_INTERFACE);
 734		}
 735	}
 736}
 737
 738static int pci_epf_test_set_bar(struct pci_epf *epf)
 739{
 740	int bar, add;
 741	int ret;
 742	struct pci_epf_bar *epf_bar;
 743	struct pci_epc *epc = epf->epc;
 744	struct device *dev = &epf->dev;
 745	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 746	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 747	const struct pci_epc_features *epc_features;
 748
 749	epc_features = epf_test->epc_features;
 750
 751	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
 752		epf_bar = &epf->bar[bar];
 753		/*
 754		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
 755		 * if the specific implementation required a 64-bit BAR,
 756		 * even if we only requested a 32-bit BAR.
 757		 */
 758		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 759
 760		if (!!(epc_features->reserved_bar & (1 << bar)))
 761			continue;
 762
 763		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
 764				      epf_bar);
 765		if (ret) {
 766			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 767					   PRIMARY_INTERFACE);
 768			dev_err(dev, "Failed to set BAR%d\n", bar);
 769			if (bar == test_reg_bar)
 770				return ret;
 771		}
 772	}
 773
 774	return 0;
 775}
 776
 777static int pci_epf_test_core_init(struct pci_epf *epf)
 778{
 779	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 780	struct pci_epf_header *header = epf->header;
 781	const struct pci_epc_features *epc_features;
 782	struct pci_epc *epc = epf->epc;
 783	struct device *dev = &epf->dev;
 784	bool msix_capable = false;
 785	bool msi_capable = true;
 786	int ret;
 787
 788	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 789	if (epc_features) {
 790		msix_capable = epc_features->msix_capable;
 791		msi_capable = epc_features->msi_capable;
 792	}
 793
 794	if (epf->vfunc_no <= 1) {
 795		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
 796		if (ret) {
 797			dev_err(dev, "Configuration header write failed\n");
 798			return ret;
 799		}
 800	}
 801
 802	ret = pci_epf_test_set_bar(epf);
 803	if (ret)
 804		return ret;
 805
 806	if (msi_capable) {
 807		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
 808				      epf->msi_interrupts);
 809		if (ret) {
 810			dev_err(dev, "MSI configuration failed\n");
 811			return ret;
 812		}
 813	}
 814
 815	if (msix_capable) {
 816		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
 817				       epf->msix_interrupts,
 818				       epf_test->test_reg_bar,
 819				       epf_test->msix_table_offset);
 820		if (ret) {
 821			dev_err(dev, "MSI-X configuration failed\n");
 822			return ret;
 823		}
 824	}
 825
 826	return 0;
 827}
 828
 829static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
 830				 void *data)
 831{
 832	struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
 833	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 834	int ret;
 835
 836	switch (val) {
 837	case CORE_INIT:
 838		ret = pci_epf_test_core_init(epf);
 839		if (ret)
 840			return NOTIFY_BAD;
 841		break;
 842
 843	case LINK_UP:
 844		queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 845				   msecs_to_jiffies(1));
 846		break;
 847
 848	default:
 849		dev_err(&epf->dev, "Invalid EPF test notifier event\n");
 850		return NOTIFY_BAD;
 851	}
 852
 853	return NOTIFY_OK;
 854}
 855
 
 
 
 
 
 856static int pci_epf_test_alloc_space(struct pci_epf *epf)
 857{
 858	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 859	struct device *dev = &epf->dev;
 860	struct pci_epf_bar *epf_bar;
 861	size_t msix_table_size = 0;
 862	size_t test_reg_bar_size;
 863	size_t pba_size = 0;
 864	bool msix_capable;
 865	void *base;
 866	int bar, add;
 867	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 868	const struct pci_epc_features *epc_features;
 869	size_t test_reg_size;
 870
 871	epc_features = epf_test->epc_features;
 872
 873	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
 874
 875	msix_capable = epc_features->msix_capable;
 876	if (msix_capable) {
 877		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
 878		epf_test->msix_table_offset = test_reg_bar_size;
 879		/* Align to QWORD or 8 Bytes */
 880		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
 881	}
 882	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
 883
 884	if (epc_features->bar_fixed_size[test_reg_bar]) {
 885		if (test_reg_size > bar_size[test_reg_bar])
 886			return -ENOMEM;
 887		test_reg_size = bar_size[test_reg_bar];
 888	}
 889
 890	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
 891				   epc_features->align, PRIMARY_INTERFACE);
 892	if (!base) {
 893		dev_err(dev, "Failed to allocated register space\n");
 894		return -ENOMEM;
 895	}
 896	epf_test->reg[test_reg_bar] = base;
 897
 898	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
 899		epf_bar = &epf->bar[bar];
 900		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 901
 902		if (bar == test_reg_bar)
 903			continue;
 904
 905		if (!!(epc_features->reserved_bar & (1 << bar)))
 906			continue;
 907
 908		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
 909					   epc_features->align,
 910					   PRIMARY_INTERFACE);
 911		if (!base)
 912			dev_err(dev, "Failed to allocate space for BAR%d\n",
 913				bar);
 914		epf_test->reg[bar] = base;
 915	}
 916
 917	return 0;
 918}
 919
 920static void pci_epf_configure_bar(struct pci_epf *epf,
 921				  const struct pci_epc_features *epc_features)
 922{
 923	struct pci_epf_bar *epf_bar;
 924	bool bar_fixed_64bit;
 925	int i;
 926
 927	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 928		epf_bar = &epf->bar[i];
 929		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
 930		if (bar_fixed_64bit)
 931			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
 932		if (epc_features->bar_fixed_size[i])
 933			bar_size[i] = epc_features->bar_fixed_size[i];
 934	}
 935}
 936
 937static int pci_epf_test_bind(struct pci_epf *epf)
 938{
 939	int ret;
 940	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 941	const struct pci_epc_features *epc_features;
 942	enum pci_barno test_reg_bar = BAR_0;
 943	struct pci_epc *epc = epf->epc;
 944	bool linkup_notifier = false;
 945	bool core_init_notifier = false;
 946
 947	if (WARN_ON_ONCE(!epc))
 948		return -EINVAL;
 949
 950	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 951	if (!epc_features) {
 952		dev_err(&epf->dev, "epc_features not implemented\n");
 953		return -EOPNOTSUPP;
 954	}
 955
 956	linkup_notifier = epc_features->linkup_notifier;
 957	core_init_notifier = epc_features->core_init_notifier;
 958	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
 959	if (test_reg_bar < 0)
 960		return -EINVAL;
 961	pci_epf_configure_bar(epf, epc_features);
 962
 963	epf_test->test_reg_bar = test_reg_bar;
 964	epf_test->epc_features = epc_features;
 965
 966	ret = pci_epf_test_alloc_space(epf);
 967	if (ret)
 968		return ret;
 969
 970	if (!core_init_notifier) {
 971		ret = pci_epf_test_core_init(epf);
 972		if (ret)
 973			return ret;
 974	}
 975
 976	epf_test->dma_supported = true;
 977
 978	ret = pci_epf_test_init_dma_chan(epf_test);
 979	if (ret)
 980		epf_test->dma_supported = false;
 981
 982	if (linkup_notifier || core_init_notifier) {
 983		epf->nb.notifier_call = pci_epf_test_notifier;
 984		pci_epc_register_notifier(epc, &epf->nb);
 985	} else {
 986		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
 987	}
 988
 989	return 0;
 990}
 991
 992static const struct pci_epf_device_id pci_epf_test_ids[] = {
 993	{
 994		.name = "pci_epf_test",
 995	},
 996	{},
 997};
 998
 999static int pci_epf_test_probe(struct pci_epf *epf)
 
1000{
1001	struct pci_epf_test *epf_test;
1002	struct device *dev = &epf->dev;
1003
1004	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1005	if (!epf_test)
1006		return -ENOMEM;
1007
1008	epf->header = &test_header;
1009	epf_test->epf = epf;
1010
1011	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1012
 
 
1013	epf_set_drvdata(epf, epf_test);
1014	return 0;
1015}
1016
1017static struct pci_epf_ops ops = {
1018	.unbind	= pci_epf_test_unbind,
1019	.bind	= pci_epf_test_bind,
1020};
1021
1022static struct pci_epf_driver test_driver = {
1023	.driver.name	= "pci_epf_test",
1024	.probe		= pci_epf_test_probe,
1025	.id_table	= pci_epf_test_ids,
1026	.ops		= &ops,
1027	.owner		= THIS_MODULE,
1028};
1029
1030static int __init pci_epf_test_init(void)
1031{
1032	int ret;
1033
1034	kpcitest_workqueue = alloc_workqueue("kpcitest",
1035					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1036	if (!kpcitest_workqueue) {
1037		pr_err("Failed to allocate the kpcitest work queue\n");
1038		return -ENOMEM;
1039	}
1040
1041	ret = pci_epf_register_driver(&test_driver);
1042	if (ret) {
1043		destroy_workqueue(kpcitest_workqueue);
1044		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1045		return ret;
1046	}
1047
1048	return 0;
1049}
1050module_init(pci_epf_test_init);
1051
1052static void __exit pci_epf_test_exit(void)
1053{
1054	if (kpcitest_workqueue)
1055		destroy_workqueue(kpcitest_workqueue);
1056	pci_epf_unregister_driver(&test_driver);
1057}
1058module_exit(pci_epf_test_exit);
1059
1060MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1061MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1062MODULE_LICENSE("GPL v2");
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/dmaengine.h>
  12#include <linux/io.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/pci_ids.h>
  16#include <linux/random.h>
  17
  18#include <linux/pci-epc.h>
  19#include <linux/pci-epf.h>
  20#include <linux/pci_regs.h>
  21
  22#define IRQ_TYPE_INTX			0
  23#define IRQ_TYPE_MSI			1
  24#define IRQ_TYPE_MSIX			2
  25
  26#define COMMAND_RAISE_INTX_IRQ		BIT(0)
  27#define COMMAND_RAISE_MSI_IRQ		BIT(1)
  28#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
  29#define COMMAND_READ			BIT(3)
  30#define COMMAND_WRITE			BIT(4)
  31#define COMMAND_COPY			BIT(5)
  32
  33#define STATUS_READ_SUCCESS		BIT(0)
  34#define STATUS_READ_FAIL		BIT(1)
  35#define STATUS_WRITE_SUCCESS		BIT(2)
  36#define STATUS_WRITE_FAIL		BIT(3)
  37#define STATUS_COPY_SUCCESS		BIT(4)
  38#define STATUS_COPY_FAIL		BIT(5)
  39#define STATUS_IRQ_RAISED		BIT(6)
  40#define STATUS_SRC_ADDR_INVALID		BIT(7)
  41#define STATUS_DST_ADDR_INVALID		BIT(8)
  42
  43#define FLAG_USE_DMA			BIT(0)
  44
  45#define TIMER_RESOLUTION		1
  46
  47static struct workqueue_struct *kpcitest_workqueue;
  48
  49struct pci_epf_test {
  50	void			*reg[PCI_STD_NUM_BARS];
  51	struct pci_epf		*epf;
  52	enum pci_barno		test_reg_bar;
  53	size_t			msix_table_offset;
  54	struct delayed_work	cmd_handler;
  55	struct dma_chan		*dma_chan_tx;
  56	struct dma_chan		*dma_chan_rx;
  57	struct dma_chan		*transfer_chan;
  58	dma_cookie_t		transfer_cookie;
  59	enum dma_status		transfer_status;
  60	struct completion	transfer_complete;
  61	bool			dma_supported;
  62	bool			dma_private;
  63	const struct pci_epc_features *epc_features;
  64};
  65
  66struct pci_epf_test_reg {
  67	u32	magic;
  68	u32	command;
  69	u32	status;
  70	u64	src_addr;
  71	u64	dst_addr;
  72	u32	size;
  73	u32	checksum;
  74	u32	irq_type;
  75	u32	irq_number;
  76	u32	flags;
  77} __packed;
  78
  79static struct pci_epf_header test_header = {
  80	.vendorid	= PCI_ANY_ID,
  81	.deviceid	= PCI_ANY_ID,
  82	.baseclass_code = PCI_CLASS_OTHERS,
  83	.interrupt_pin	= PCI_INTERRUPT_INTA,
  84};
  85
  86static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
  87
  88static void pci_epf_test_dma_callback(void *param)
  89{
  90	struct pci_epf_test *epf_test = param;
  91	struct dma_tx_state state;
  92
  93	epf_test->transfer_status =
  94		dmaengine_tx_status(epf_test->transfer_chan,
  95				    epf_test->transfer_cookie, &state);
  96	if (epf_test->transfer_status == DMA_COMPLETE ||
  97	    epf_test->transfer_status == DMA_ERROR)
  98		complete(&epf_test->transfer_complete);
  99}
 100
 101/**
 102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
 103 *				  data between PCIe EP and remote PCIe RC
 104 * @epf_test: the EPF test device that performs the data transfer operation
 105 * @dma_dst: The destination address of the data transfer. It can be a physical
 106 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 107 * @dma_src: The source address of the data transfer. It can be a physical
 108 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 109 * @len: The size of the data transfer
 110 * @dma_remote: remote RC physical address
 111 * @dir: DMA transfer direction
 112 *
 113 * Function that uses dmaengine API to transfer data between PCIe EP and remote
 114 * PCIe RC. The source and destination address can be a physical address given
 115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
 116 *
 117 * The function returns '0' on success and negative value on failure.
 118 */
 119static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
 120				      dma_addr_t dma_dst, dma_addr_t dma_src,
 121				      size_t len, dma_addr_t dma_remote,
 122				      enum dma_transfer_direction dir)
 123{
 124	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
 125				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
 126	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
 127	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 128	struct pci_epf *epf = epf_test->epf;
 129	struct dma_async_tx_descriptor *tx;
 130	struct dma_slave_config sconf = {};
 131	struct device *dev = &epf->dev;
 
 132	int ret;
 133
 134	if (IS_ERR_OR_NULL(chan)) {
 135		dev_err(dev, "Invalid DMA memcpy channel\n");
 136		return -EINVAL;
 137	}
 138
 139	if (epf_test->dma_private) {
 140		sconf.direction = dir;
 141		if (dir == DMA_MEM_TO_DEV)
 142			sconf.dst_addr = dma_remote;
 143		else
 144			sconf.src_addr = dma_remote;
 145
 146		if (dmaengine_slave_config(chan, &sconf)) {
 147			dev_err(dev, "DMA slave config fail\n");
 148			return -EIO;
 149		}
 150		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
 151						 flags);
 152	} else {
 153		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
 154					       flags);
 155	}
 156
 157	if (!tx) {
 158		dev_err(dev, "Failed to prepare DMA memcpy\n");
 159		return -EIO;
 160	}
 161
 162	reinit_completion(&epf_test->transfer_complete);
 163	epf_test->transfer_chan = chan;
 164	tx->callback = pci_epf_test_dma_callback;
 165	tx->callback_param = epf_test;
 166	epf_test->transfer_cookie = dmaengine_submit(tx);
 
 167
 168	ret = dma_submit_error(epf_test->transfer_cookie);
 169	if (ret) {
 170		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
 171		goto terminate;
 172	}
 173
 174	dma_async_issue_pending(chan);
 175	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
 176	if (ret < 0) {
 177		dev_err(dev, "DMA wait_for_completion interrupted\n");
 178		goto terminate;
 
 179	}
 180
 181	if (epf_test->transfer_status == DMA_ERROR) {
 182		dev_err(dev, "DMA transfer failed\n");
 183		ret = -EIO;
 184	}
 185
 186terminate:
 187	dmaengine_terminate_sync(chan);
 188
 189	return ret;
 190}
 191
 192struct epf_dma_filter {
 193	struct device *dev;
 194	u32 dma_mask;
 195};
 196
 197static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
 198{
 199	struct epf_dma_filter *filter = node;
 200	struct dma_slave_caps caps;
 201
 202	memset(&caps, 0, sizeof(caps));
 203	dma_get_slave_caps(chan, &caps);
 204
 205	return chan->device->dev == filter->dev
 206		&& (filter->dma_mask & caps.directions);
 207}
 208
 209/**
 210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
 211 * @epf_test: the EPF test device that performs data transfer operation
 212 *
 213 * Function to initialize EPF test DMA channel.
 214 */
 215static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
 216{
 217	struct pci_epf *epf = epf_test->epf;
 218	struct device *dev = &epf->dev;
 219	struct epf_dma_filter filter;
 220	struct dma_chan *dma_chan;
 221	dma_cap_mask_t mask;
 222	int ret;
 223
 224	filter.dev = epf->epc->dev.parent;
 225	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
 226
 227	dma_cap_zero(mask);
 228	dma_cap_set(DMA_SLAVE, mask);
 229	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 230	if (!dma_chan) {
 231		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
 232		goto fail_back_tx;
 233	}
 234
 235	epf_test->dma_chan_rx = dma_chan;
 236
 237	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
 238	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 239
 240	if (!dma_chan) {
 241		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
 242		goto fail_back_rx;
 243	}
 244
 245	epf_test->dma_chan_tx = dma_chan;
 246	epf_test->dma_private = true;
 247
 248	init_completion(&epf_test->transfer_complete);
 249
 250	return 0;
 251
 252fail_back_rx:
 253	dma_release_channel(epf_test->dma_chan_rx);
 254	epf_test->dma_chan_tx = NULL;
 255
 256fail_back_tx:
 257	dma_cap_zero(mask);
 258	dma_cap_set(DMA_MEMCPY, mask);
 259
 260	dma_chan = dma_request_chan_by_mask(&mask);
 261	if (IS_ERR(dma_chan)) {
 262		ret = PTR_ERR(dma_chan);
 263		if (ret != -EPROBE_DEFER)
 264			dev_err(dev, "Failed to get DMA channel\n");
 265		return ret;
 266	}
 267	init_completion(&epf_test->transfer_complete);
 268
 269	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
 270
 271	return 0;
 272}
 273
 274/**
 275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
 276 * @epf_test: the EPF test device that performs data transfer operation
 277 *
 278 * Helper to cleanup EPF test DMA channel.
 279 */
 280static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
 281{
 282	if (!epf_test->dma_supported)
 283		return;
 284
 285	dma_release_channel(epf_test->dma_chan_tx);
 286	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
 287		epf_test->dma_chan_tx = NULL;
 288		epf_test->dma_chan_rx = NULL;
 289		return;
 290	}
 291
 292	dma_release_channel(epf_test->dma_chan_rx);
 293	epf_test->dma_chan_rx = NULL;
 294
 295	return;
 296}
 297
 298static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
 299				    const char *op, u64 size,
 300				    struct timespec64 *start,
 301				    struct timespec64 *end, bool dma)
 302{
 303	struct timespec64 ts = timespec64_sub(*end, *start);
 304	u64 rate = 0, ns;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305
 306	/* calculate the rate */
 307	ns = timespec64_to_ns(&ts);
 308	if (ns)
 309		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
 310
 311	dev_info(&epf_test->epf->dev,
 312		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
 313		 op, size, dma ? "YES" : "NO",
 314		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
 315}
 316
 317static void pci_epf_test_copy(struct pci_epf_test *epf_test,
 318			      struct pci_epf_test_reg *reg)
 319{
 320	int ret;
 
 321	void __iomem *src_addr;
 322	void __iomem *dst_addr;
 323	phys_addr_t src_phys_addr;
 324	phys_addr_t dst_phys_addr;
 325	struct timespec64 start, end;
 326	struct pci_epf *epf = epf_test->epf;
 327	struct device *dev = &epf->dev;
 328	struct pci_epc *epc = epf->epc;
 
 
 329
 330	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 331	if (!src_addr) {
 332		dev_err(dev, "Failed to allocate source address\n");
 333		reg->status = STATUS_SRC_ADDR_INVALID;
 334		ret = -ENOMEM;
 335		goto err;
 336	}
 337
 338	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
 339			       reg->src_addr, reg->size);
 340	if (ret) {
 341		dev_err(dev, "Failed to map source address\n");
 342		reg->status = STATUS_SRC_ADDR_INVALID;
 343		goto err_src_addr;
 344	}
 345
 346	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
 347	if (!dst_addr) {
 348		dev_err(dev, "Failed to allocate destination address\n");
 349		reg->status = STATUS_DST_ADDR_INVALID;
 350		ret = -ENOMEM;
 351		goto err_src_map_addr;
 352	}
 353
 354	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
 355			       reg->dst_addr, reg->size);
 356	if (ret) {
 357		dev_err(dev, "Failed to map destination address\n");
 358		reg->status = STATUS_DST_ADDR_INVALID;
 359		goto err_dst_addr;
 360	}
 361
 362	ktime_get_ts64(&start);
 363	if (reg->flags & FLAG_USE_DMA) {
 
 
 
 
 
 
 
 364		if (epf_test->dma_private) {
 365			dev_err(dev, "Cannot transfer data using DMA\n");
 366			ret = -EINVAL;
 367			goto err_map_addr;
 368		}
 369
 370		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
 371						 src_phys_addr, reg->size, 0,
 372						 DMA_MEM_TO_MEM);
 373		if (ret)
 374			dev_err(dev, "Data transfer failed\n");
 375	} else {
 376		void *buf;
 377
 378		buf = kzalloc(reg->size, GFP_KERNEL);
 379		if (!buf) {
 380			ret = -ENOMEM;
 381			goto err_map_addr;
 382		}
 383
 384		memcpy_fromio(buf, src_addr, reg->size);
 385		memcpy_toio(dst_addr, buf, reg->size);
 386		kfree(buf);
 387	}
 388	ktime_get_ts64(&end);
 389	pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
 390				reg->flags & FLAG_USE_DMA);
 391
 392err_map_addr:
 393	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
 394
 395err_dst_addr:
 396	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
 397
 398err_src_map_addr:
 399	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
 400
 401err_src_addr:
 402	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
 403
 404err:
 405	if (!ret)
 406		reg->status |= STATUS_COPY_SUCCESS;
 407	else
 408		reg->status |= STATUS_COPY_FAIL;
 409}
 410
 411static void pci_epf_test_read(struct pci_epf_test *epf_test,
 412			      struct pci_epf_test_reg *reg)
 413{
 414	int ret;
 415	void __iomem *src_addr;
 416	void *buf;
 417	u32 crc32;
 
 418	phys_addr_t phys_addr;
 419	phys_addr_t dst_phys_addr;
 420	struct timespec64 start, end;
 421	struct pci_epf *epf = epf_test->epf;
 422	struct device *dev = &epf->dev;
 423	struct pci_epc *epc = epf->epc;
 424	struct device *dma_dev = epf->epc->dev.parent;
 
 
 425
 426	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 427	if (!src_addr) {
 428		dev_err(dev, "Failed to allocate address\n");
 429		reg->status = STATUS_SRC_ADDR_INVALID;
 430		ret = -ENOMEM;
 431		goto err;
 432	}
 433
 434	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
 435			       reg->src_addr, reg->size);
 436	if (ret) {
 437		dev_err(dev, "Failed to map address\n");
 438		reg->status = STATUS_SRC_ADDR_INVALID;
 439		goto err_addr;
 440	}
 441
 442	buf = kzalloc(reg->size, GFP_KERNEL);
 443	if (!buf) {
 444		ret = -ENOMEM;
 445		goto err_map_addr;
 446	}
 447
 448	if (reg->flags & FLAG_USE_DMA) {
 
 
 
 
 
 
 
 449		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
 450					       DMA_FROM_DEVICE);
 451		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
 452			dev_err(dev, "Failed to map destination buffer addr\n");
 453			ret = -ENOMEM;
 454			goto err_dma_map;
 455		}
 456
 457		ktime_get_ts64(&start);
 458		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
 459						 phys_addr, reg->size,
 460						 reg->src_addr, DMA_DEV_TO_MEM);
 461		if (ret)
 462			dev_err(dev, "Data transfer failed\n");
 463		ktime_get_ts64(&end);
 464
 465		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
 466				 DMA_FROM_DEVICE);
 467	} else {
 468		ktime_get_ts64(&start);
 469		memcpy_fromio(buf, src_addr, reg->size);
 470		ktime_get_ts64(&end);
 471	}
 472
 473	pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
 474				reg->flags & FLAG_USE_DMA);
 475
 476	crc32 = crc32_le(~0, buf, reg->size);
 477	if (crc32 != reg->checksum)
 478		ret = -EIO;
 479
 480err_dma_map:
 481	kfree(buf);
 482
 483err_map_addr:
 484	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
 485
 486err_addr:
 487	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
 488
 489err:
 490	if (!ret)
 491		reg->status |= STATUS_READ_SUCCESS;
 492	else
 493		reg->status |= STATUS_READ_FAIL;
 494}
 495
 496static void pci_epf_test_write(struct pci_epf_test *epf_test,
 497			       struct pci_epf_test_reg *reg)
 498{
 499	int ret;
 500	void __iomem *dst_addr;
 501	void *buf;
 
 502	phys_addr_t phys_addr;
 503	phys_addr_t src_phys_addr;
 504	struct timespec64 start, end;
 505	struct pci_epf *epf = epf_test->epf;
 506	struct device *dev = &epf->dev;
 507	struct pci_epc *epc = epf->epc;
 508	struct device *dma_dev = epf->epc->dev.parent;
 
 
 509
 510	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
 511	if (!dst_addr) {
 512		dev_err(dev, "Failed to allocate address\n");
 513		reg->status = STATUS_DST_ADDR_INVALID;
 514		ret = -ENOMEM;
 515		goto err;
 516	}
 517
 518	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
 519			       reg->dst_addr, reg->size);
 520	if (ret) {
 521		dev_err(dev, "Failed to map address\n");
 522		reg->status = STATUS_DST_ADDR_INVALID;
 523		goto err_addr;
 524	}
 525
 526	buf = kzalloc(reg->size, GFP_KERNEL);
 527	if (!buf) {
 528		ret = -ENOMEM;
 529		goto err_map_addr;
 530	}
 531
 532	get_random_bytes(buf, reg->size);
 533	reg->checksum = crc32_le(~0, buf, reg->size);
 534
 535	if (reg->flags & FLAG_USE_DMA) {
 
 
 
 
 
 
 
 536		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
 537					       DMA_TO_DEVICE);
 538		if (dma_mapping_error(dma_dev, src_phys_addr)) {
 539			dev_err(dev, "Failed to map source buffer addr\n");
 540			ret = -ENOMEM;
 541			goto err_dma_map;
 542		}
 543
 544		ktime_get_ts64(&start);
 545
 546		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
 547						 src_phys_addr, reg->size,
 548						 reg->dst_addr,
 549						 DMA_MEM_TO_DEV);
 550		if (ret)
 551			dev_err(dev, "Data transfer failed\n");
 552		ktime_get_ts64(&end);
 553
 554		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
 555				 DMA_TO_DEVICE);
 556	} else {
 557		ktime_get_ts64(&start);
 558		memcpy_toio(dst_addr, buf, reg->size);
 559		ktime_get_ts64(&end);
 560	}
 561
 562	pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
 563				reg->flags & FLAG_USE_DMA);
 564
 565	/*
 566	 * wait 1ms inorder for the write to complete. Without this delay L3
 567	 * error in observed in the host system.
 568	 */
 569	usleep_range(1000, 2000);
 570
 571err_dma_map:
 572	kfree(buf);
 573
 574err_map_addr:
 575	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
 576
 577err_addr:
 578	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
 579
 580err:
 581	if (!ret)
 582		reg->status |= STATUS_WRITE_SUCCESS;
 583	else
 584		reg->status |= STATUS_WRITE_FAIL;
 585}
 586
 587static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
 588				   struct pci_epf_test_reg *reg)
 589{
 590	struct pci_epf *epf = epf_test->epf;
 591	struct device *dev = &epf->dev;
 592	struct pci_epc *epc = epf->epc;
 593	u32 status = reg->status | STATUS_IRQ_RAISED;
 594	int count;
 595
 596	/*
 597	 * Set the status before raising the IRQ to ensure that the host sees
 598	 * the updated value when it gets the IRQ.
 599	 */
 600	WRITE_ONCE(reg->status, status);
 601
 602	switch (reg->irq_type) {
 603	case IRQ_TYPE_INTX:
 604		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 605				  PCI_IRQ_INTX, 0);
 606		break;
 607	case IRQ_TYPE_MSI:
 608		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
 609		if (reg->irq_number > count || count <= 0) {
 610			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
 611				reg->irq_number, count);
 612			return;
 613		}
 614		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 615				  PCI_IRQ_MSI, reg->irq_number);
 616		break;
 617	case IRQ_TYPE_MSIX:
 618		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
 619		if (reg->irq_number > count || count <= 0) {
 620			dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
 621				reg->irq_number, count);
 622			return;
 623		}
 624		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 625				  PCI_IRQ_MSIX, reg->irq_number);
 626		break;
 627	default:
 628		dev_err(dev, "Failed to raise IRQ, unknown type\n");
 629		break;
 630	}
 631}
 632
 633static void pci_epf_test_cmd_handler(struct work_struct *work)
 634{
 
 
 635	u32 command;
 636	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
 637						     cmd_handler.work);
 638	struct pci_epf *epf = epf_test->epf;
 639	struct device *dev = &epf->dev;
 
 640	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 641	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 642
 643	command = READ_ONCE(reg->command);
 644	if (!command)
 645		goto reset_handler;
 646
 647	WRITE_ONCE(reg->command, 0);
 648	WRITE_ONCE(reg->status, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649
 650	if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
 651	    !epf_test->dma_supported) {
 652		dev_err(dev, "Cannot transfer data using DMA\n");
 
 
 
 
 
 653		goto reset_handler;
 654	}
 655
 656	if (reg->irq_type > IRQ_TYPE_MSIX) {
 657		dev_err(dev, "Failed to detect IRQ type\n");
 
 
 
 
 
 658		goto reset_handler;
 659	}
 660
 661	switch (command) {
 662	case COMMAND_RAISE_INTX_IRQ:
 663	case COMMAND_RAISE_MSI_IRQ:
 664	case COMMAND_RAISE_MSIX_IRQ:
 665		pci_epf_test_raise_irq(epf_test, reg);
 666		break;
 667	case COMMAND_WRITE:
 668		pci_epf_test_write(epf_test, reg);
 669		pci_epf_test_raise_irq(epf_test, reg);
 670		break;
 671	case COMMAND_READ:
 672		pci_epf_test_read(epf_test, reg);
 673		pci_epf_test_raise_irq(epf_test, reg);
 674		break;
 675	case COMMAND_COPY:
 676		pci_epf_test_copy(epf_test, reg);
 677		pci_epf_test_raise_irq(epf_test, reg);
 678		break;
 679	default:
 680		dev_err(dev, "Invalid command 0x%x\n", command);
 681		break;
 682	}
 683
 684reset_handler:
 685	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 686			   msecs_to_jiffies(1));
 687}
 688
 689static void pci_epf_test_unbind(struct pci_epf *epf)
 690{
 691	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 692	struct pci_epc *epc = epf->epc;
 693	struct pci_epf_bar *epf_bar;
 694	int bar;
 695
 696	cancel_delayed_work(&epf_test->cmd_handler);
 697	pci_epf_test_clean_dma_chan(epf_test);
 698	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 699		epf_bar = &epf->bar[bar];
 700
 701		if (epf_test->reg[bar]) {
 702			pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
 703					  epf_bar);
 704			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 705					   PRIMARY_INTERFACE);
 706		}
 707	}
 708}
 709
 710static int pci_epf_test_set_bar(struct pci_epf *epf)
 711{
 712	int bar, add;
 713	int ret;
 714	struct pci_epf_bar *epf_bar;
 715	struct pci_epc *epc = epf->epc;
 716	struct device *dev = &epf->dev;
 717	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 718	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 719	const struct pci_epc_features *epc_features;
 720
 721	epc_features = epf_test->epc_features;
 722
 723	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
 724		epf_bar = &epf->bar[bar];
 725		/*
 726		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
 727		 * if the specific implementation required a 64-bit BAR,
 728		 * even if we only requested a 32-bit BAR.
 729		 */
 730		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 731
 732		if (!!(epc_features->reserved_bar & (1 << bar)))
 733			continue;
 734
 735		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
 736				      epf_bar);
 737		if (ret) {
 738			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 739					   PRIMARY_INTERFACE);
 740			dev_err(dev, "Failed to set BAR%d\n", bar);
 741			if (bar == test_reg_bar)
 742				return ret;
 743		}
 744	}
 745
 746	return 0;
 747}
 748
 749static int pci_epf_test_core_init(struct pci_epf *epf)
 750{
 751	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 752	struct pci_epf_header *header = epf->header;
 753	const struct pci_epc_features *epc_features;
 754	struct pci_epc *epc = epf->epc;
 755	struct device *dev = &epf->dev;
 756	bool msix_capable = false;
 757	bool msi_capable = true;
 758	int ret;
 759
 760	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 761	if (epc_features) {
 762		msix_capable = epc_features->msix_capable;
 763		msi_capable = epc_features->msi_capable;
 764	}
 765
 766	if (epf->vfunc_no <= 1) {
 767		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
 768		if (ret) {
 769			dev_err(dev, "Configuration header write failed\n");
 770			return ret;
 771		}
 772	}
 773
 774	ret = pci_epf_test_set_bar(epf);
 775	if (ret)
 776		return ret;
 777
 778	if (msi_capable) {
 779		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
 780				      epf->msi_interrupts);
 781		if (ret) {
 782			dev_err(dev, "MSI configuration failed\n");
 783			return ret;
 784		}
 785	}
 786
 787	if (msix_capable) {
 788		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
 789				       epf->msix_interrupts,
 790				       epf_test->test_reg_bar,
 791				       epf_test->msix_table_offset);
 792		if (ret) {
 793			dev_err(dev, "MSI-X configuration failed\n");
 794			return ret;
 795		}
 796	}
 797
 798	return 0;
 799}
 800
 801static int pci_epf_test_link_up(struct pci_epf *epf)
 
 802{
 
 803	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 
 
 
 
 
 
 
 
 804
 805	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 806			   msecs_to_jiffies(1));
 
 
 
 
 
 
 
 807
 808	return 0;
 809}
 810
 811static const struct pci_epc_event_ops pci_epf_test_event_ops = {
 812	.core_init = pci_epf_test_core_init,
 813	.link_up = pci_epf_test_link_up,
 814};
 815
 816static int pci_epf_test_alloc_space(struct pci_epf *epf)
 817{
 818	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 819	struct device *dev = &epf->dev;
 820	struct pci_epf_bar *epf_bar;
 821	size_t msix_table_size = 0;
 822	size_t test_reg_bar_size;
 823	size_t pba_size = 0;
 824	bool msix_capable;
 825	void *base;
 826	int bar, add;
 827	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 828	const struct pci_epc_features *epc_features;
 829	size_t test_reg_size;
 830
 831	epc_features = epf_test->epc_features;
 832
 833	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
 834
 835	msix_capable = epc_features->msix_capable;
 836	if (msix_capable) {
 837		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
 838		epf_test->msix_table_offset = test_reg_bar_size;
 839		/* Align to QWORD or 8 Bytes */
 840		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
 841	}
 842	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
 843
 844	if (epc_features->bar_fixed_size[test_reg_bar]) {
 845		if (test_reg_size > bar_size[test_reg_bar])
 846			return -ENOMEM;
 847		test_reg_size = bar_size[test_reg_bar];
 848	}
 849
 850	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
 851				   epc_features->align, PRIMARY_INTERFACE);
 852	if (!base) {
 853		dev_err(dev, "Failed to allocated register space\n");
 854		return -ENOMEM;
 855	}
 856	epf_test->reg[test_reg_bar] = base;
 857
 858	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
 859		epf_bar = &epf->bar[bar];
 860		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 861
 862		if (bar == test_reg_bar)
 863			continue;
 864
 865		if (!!(epc_features->reserved_bar & (1 << bar)))
 866			continue;
 867
 868		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
 869					   epc_features->align,
 870					   PRIMARY_INTERFACE);
 871		if (!base)
 872			dev_err(dev, "Failed to allocate space for BAR%d\n",
 873				bar);
 874		epf_test->reg[bar] = base;
 875	}
 876
 877	return 0;
 878}
 879
 880static void pci_epf_configure_bar(struct pci_epf *epf,
 881				  const struct pci_epc_features *epc_features)
 882{
 883	struct pci_epf_bar *epf_bar;
 884	bool bar_fixed_64bit;
 885	int i;
 886
 887	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 888		epf_bar = &epf->bar[i];
 889		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
 890		if (bar_fixed_64bit)
 891			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
 892		if (epc_features->bar_fixed_size[i])
 893			bar_size[i] = epc_features->bar_fixed_size[i];
 894	}
 895}
 896
 897static int pci_epf_test_bind(struct pci_epf *epf)
 898{
 899	int ret;
 900	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 901	const struct pci_epc_features *epc_features;
 902	enum pci_barno test_reg_bar = BAR_0;
 903	struct pci_epc *epc = epf->epc;
 904	bool linkup_notifier = false;
 905	bool core_init_notifier = false;
 906
 907	if (WARN_ON_ONCE(!epc))
 908		return -EINVAL;
 909
 910	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 911	if (!epc_features) {
 912		dev_err(&epf->dev, "epc_features not implemented\n");
 913		return -EOPNOTSUPP;
 914	}
 915
 916	linkup_notifier = epc_features->linkup_notifier;
 917	core_init_notifier = epc_features->core_init_notifier;
 918	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
 919	if (test_reg_bar < 0)
 920		return -EINVAL;
 921	pci_epf_configure_bar(epf, epc_features);
 922
 923	epf_test->test_reg_bar = test_reg_bar;
 924	epf_test->epc_features = epc_features;
 925
 926	ret = pci_epf_test_alloc_space(epf);
 927	if (ret)
 928		return ret;
 929
 930	if (!core_init_notifier) {
 931		ret = pci_epf_test_core_init(epf);
 932		if (ret)
 933			return ret;
 934	}
 935
 936	epf_test->dma_supported = true;
 937
 938	ret = pci_epf_test_init_dma_chan(epf_test);
 939	if (ret)
 940		epf_test->dma_supported = false;
 941
 942	if (!linkup_notifier && !core_init_notifier)
 
 
 
 943		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
 
 944
 945	return 0;
 946}
 947
 948static const struct pci_epf_device_id pci_epf_test_ids[] = {
 949	{
 950		.name = "pci_epf_test",
 951	},
 952	{},
 953};
 954
 955static int pci_epf_test_probe(struct pci_epf *epf,
 956			      const struct pci_epf_device_id *id)
 957{
 958	struct pci_epf_test *epf_test;
 959	struct device *dev = &epf->dev;
 960
 961	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
 962	if (!epf_test)
 963		return -ENOMEM;
 964
 965	epf->header = &test_header;
 966	epf_test->epf = epf;
 967
 968	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
 969
 970	epf->event_ops = &pci_epf_test_event_ops;
 971
 972	epf_set_drvdata(epf, epf_test);
 973	return 0;
 974}
 975
 976static const struct pci_epf_ops ops = {
 977	.unbind	= pci_epf_test_unbind,
 978	.bind	= pci_epf_test_bind,
 979};
 980
 981static struct pci_epf_driver test_driver = {
 982	.driver.name	= "pci_epf_test",
 983	.probe		= pci_epf_test_probe,
 984	.id_table	= pci_epf_test_ids,
 985	.ops		= &ops,
 986	.owner		= THIS_MODULE,
 987};
 988
 989static int __init pci_epf_test_init(void)
 990{
 991	int ret;
 992
 993	kpcitest_workqueue = alloc_workqueue("kpcitest",
 994					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 995	if (!kpcitest_workqueue) {
 996		pr_err("Failed to allocate the kpcitest work queue\n");
 997		return -ENOMEM;
 998	}
 999
1000	ret = pci_epf_register_driver(&test_driver);
1001	if (ret) {
1002		destroy_workqueue(kpcitest_workqueue);
1003		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1004		return ret;
1005	}
1006
1007	return 0;
1008}
1009module_init(pci_epf_test_init);
1010
1011static void __exit pci_epf_test_exit(void)
1012{
1013	if (kpcitest_workqueue)
1014		destroy_workqueue(kpcitest_workqueue);
1015	pci_epf_unregister_driver(&test_driver);
1016}
1017module_exit(pci_epf_test_exit);
1018
1019MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1020MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1021MODULE_LICENSE("GPL v2");