Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * Test driver to test endpoint functionality
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 
 11#include <linux/io.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/pci_ids.h>
 15#include <linux/random.h>
 16
 17#include <linux/pci-epc.h>
 18#include <linux/pci-epf.h>
 19#include <linux/pci_regs.h>
 20
 21#define IRQ_TYPE_LEGACY			0
 22#define IRQ_TYPE_MSI			1
 23#define IRQ_TYPE_MSIX			2
 24
 25#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
 26#define COMMAND_RAISE_MSI_IRQ		BIT(1)
 27#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
 28#define COMMAND_READ			BIT(3)
 29#define COMMAND_WRITE			BIT(4)
 30#define COMMAND_COPY			BIT(5)
 31
 32#define STATUS_READ_SUCCESS		BIT(0)
 33#define STATUS_READ_FAIL		BIT(1)
 34#define STATUS_WRITE_SUCCESS		BIT(2)
 35#define STATUS_WRITE_FAIL		BIT(3)
 36#define STATUS_COPY_SUCCESS		BIT(4)
 37#define STATUS_COPY_FAIL		BIT(5)
 38#define STATUS_IRQ_RAISED		BIT(6)
 39#define STATUS_SRC_ADDR_INVALID		BIT(7)
 40#define STATUS_DST_ADDR_INVALID		BIT(8)
 41
 
 
 42#define TIMER_RESOLUTION		1
 43
 44static struct workqueue_struct *kpcitest_workqueue;
 45
 46struct pci_epf_test {
 47	void			*reg[6];
 48	struct pci_epf		*epf;
 49	enum pci_barno		test_reg_bar;
 
 50	struct delayed_work	cmd_handler;
 
 
 
 
 
 
 
 
 51	const struct pci_epc_features *epc_features;
 52};
 53
 54struct pci_epf_test_reg {
 55	u32	magic;
 56	u32	command;
 57	u32	status;
 58	u64	src_addr;
 59	u64	dst_addr;
 60	u32	size;
 61	u32	checksum;
 62	u32	irq_type;
 63	u32	irq_number;
 
 64} __packed;
 65
 66static struct pci_epf_header test_header = {
 67	.vendorid	= PCI_ANY_ID,
 68	.deviceid	= PCI_ANY_ID,
 69	.baseclass_code = PCI_CLASS_OTHERS,
 70	.interrupt_pin	= PCI_INTERRUPT_INTA,
 71};
 72
 73static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
 74
 75static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 76{
 77	int ret;
 78	void __iomem *src_addr;
 79	void __iomem *dst_addr;
 80	phys_addr_t src_phys_addr;
 81	phys_addr_t dst_phys_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82	struct pci_epf *epf = epf_test->epf;
 
 
 83	struct device *dev = &epf->dev;
 84	struct pci_epc *epc = epf->epc;
 85	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 86	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 87
 88	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 89	if (!src_addr) {
 90		dev_err(dev, "Failed to allocate source address\n");
 91		reg->status = STATUS_SRC_ADDR_INVALID;
 92		ret = -ENOMEM;
 93		goto err;
 94	}
 95
 96	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
 97			       reg->size);
 98	if (ret) {
 99		dev_err(dev, "Failed to map source address\n");
100		reg->status = STATUS_SRC_ADDR_INVALID;
101		goto err_src_addr;
 
 
 
 
 
 
 
 
 
 
102	}
103
104	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
105	if (!dst_addr) {
106		dev_err(dev, "Failed to allocate destination address\n");
107		reg->status = STATUS_DST_ADDR_INVALID;
108		ret = -ENOMEM;
109		goto err_src_map_addr;
110	}
111
112	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
113			       reg->size);
 
 
 
 
 
114	if (ret) {
115		dev_err(dev, "Failed to map destination address\n");
116		reg->status = STATUS_DST_ADDR_INVALID;
117		goto err_dst_addr;
 
 
 
 
 
 
 
 
 
 
 
118	}
119
120	memcpy(dst_addr, src_addr, reg->size);
 
121
122	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
 
123
124err_dst_addr:
125	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
 
 
126
127err_src_map_addr:
128	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
 
 
129
130err_src_addr:
131	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
132
133err:
134	return ret;
135}
136
137static int pci_epf_test_read(struct pci_epf_test *epf_test)
 
 
 
 
 
 
138{
139	int ret;
140	void __iomem *src_addr;
141	void *buf;
142	u32 crc32;
143	phys_addr_t phys_addr;
144	struct pci_epf *epf = epf_test->epf;
145	struct device *dev = &epf->dev;
146	struct pci_epc *epc = epf->epc;
147	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
148	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 
149
150	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
151	if (!src_addr) {
152		dev_err(dev, "Failed to allocate address\n");
153		reg->status = STATUS_SRC_ADDR_INVALID;
154		ret = -ENOMEM;
155		goto err;
 
 
 
156	}
157
158	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
159			       reg->size);
160	if (ret) {
161		dev_err(dev, "Failed to map address\n");
162		reg->status = STATUS_SRC_ADDR_INVALID;
163		goto err_addr;
 
 
164	}
165
166	buf = kzalloc(reg->size, GFP_KERNEL);
167	if (!buf) {
168		ret = -ENOMEM;
169		goto err_map_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170	}
 
171
172	memcpy_fromio(buf, src_addr, reg->size);
173
174	crc32 = crc32_le(~0, buf, reg->size);
175	if (crc32 != reg->checksum)
176		ret = -EIO;
 
 
 
 
 
 
 
 
 
 
177
178	kfree(buf);
 
 
 
 
 
179
180err_map_addr:
181	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
 
182
183err_addr:
184	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
 
 
 
 
 
185
186err:
187	return ret;
 
 
 
 
 
 
 
188}
189
190static int pci_epf_test_write(struct pci_epf_test *epf_test)
 
191{
192	int ret;
193	void __iomem *dst_addr;
194	void *buf;
195	phys_addr_t phys_addr;
196	struct pci_epf *epf = epf_test->epf;
 
197	struct device *dev = &epf->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198	struct pci_epc *epc = epf->epc;
199	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
200	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 
 
 
201
202	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
203	if (!dst_addr) {
204		dev_err(dev, "Failed to allocate address\n");
205		reg->status = STATUS_DST_ADDR_INVALID;
206		ret = -ENOMEM;
207		goto err;
208	}
 
209
210	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
211			       reg->size);
212	if (ret) {
213		dev_err(dev, "Failed to map address\n");
214		reg->status = STATUS_DST_ADDR_INVALID;
215		goto err_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216	}
217
218	buf = kzalloc(reg->size, GFP_KERNEL);
219	if (!buf) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220		ret = -ENOMEM;
221		goto err_map_addr;
222	}
 
 
 
 
 
 
 
 
 
 
 
 
223
224	get_random_bytes(buf, reg->size);
225	reg->checksum = crc32_le(~0, buf, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
227	memcpy_toio(dst_addr, buf, reg->size);
 
228
229	/*
230	 * wait 1ms inorder for the write to complete. Without this delay L3
231	 * error in observed in the host system.
232	 */
233	usleep_range(1000, 2000);
234
235	kfree(buf);
236
237err_map_addr:
238	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
239
240err_addr:
241	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
242
243err:
244	return ret;
 
 
245}
246
247static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
248				   u16 irq)
249{
250	struct pci_epf *epf = epf_test->epf;
251	struct device *dev = &epf->dev;
252	struct pci_epc *epc = epf->epc;
253	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
254	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
255
256	reg->status |= STATUS_IRQ_RAISED;
 
 
 
 
257
258	switch (irq_type) {
259	case IRQ_TYPE_LEGACY:
260		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
 
261		break;
262	case IRQ_TYPE_MSI:
263		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
 
 
 
 
 
 
 
264		break;
265	case IRQ_TYPE_MSIX:
266		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
 
 
 
 
 
 
 
267		break;
268	default:
269		dev_err(dev, "Failed to raise IRQ, unknown type\n");
270		break;
271	}
272}
273
274static void pci_epf_test_cmd_handler(struct work_struct *work)
275{
276	int ret;
277	int count;
278	u32 command;
279	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
280						     cmd_handler.work);
281	struct pci_epf *epf = epf_test->epf;
282	struct device *dev = &epf->dev;
283	struct pci_epc *epc = epf->epc;
284	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
285	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
286
287	command = reg->command;
288	if (!command)
289		goto reset_handler;
290
291	reg->command = 0;
292	reg->status = 0;
293
294	if (reg->irq_type > IRQ_TYPE_MSIX) {
295		dev_err(dev, "Failed to detect IRQ type\n");
296		goto reset_handler;
297	}
298
299	if (command & COMMAND_RAISE_LEGACY_IRQ) {
300		reg->status = STATUS_IRQ_RAISED;
301		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
302		goto reset_handler;
303	}
304
305	if (command & COMMAND_WRITE) {
306		ret = pci_epf_test_write(epf_test);
307		if (ret)
308			reg->status |= STATUS_WRITE_FAIL;
309		else
310			reg->status |= STATUS_WRITE_SUCCESS;
311		pci_epf_test_raise_irq(epf_test, reg->irq_type,
312				       reg->irq_number);
313		goto reset_handler;
314	}
315
316	if (command & COMMAND_READ) {
317		ret = pci_epf_test_read(epf_test);
318		if (!ret)
319			reg->status |= STATUS_READ_SUCCESS;
320		else
321			reg->status |= STATUS_READ_FAIL;
322		pci_epf_test_raise_irq(epf_test, reg->irq_type,
323				       reg->irq_number);
324		goto reset_handler;
325	}
326
327	if (command & COMMAND_COPY) {
328		ret = pci_epf_test_copy(epf_test);
329		if (!ret)
330			reg->status |= STATUS_COPY_SUCCESS;
331		else
332			reg->status |= STATUS_COPY_FAIL;
333		pci_epf_test_raise_irq(epf_test, reg->irq_type,
334				       reg->irq_number);
335		goto reset_handler;
336	}
337
338	if (command & COMMAND_RAISE_MSI_IRQ) {
339		count = pci_epc_get_msi(epc, epf->func_no);
340		if (reg->irq_number > count || count <= 0)
341			goto reset_handler;
342		reg->status = STATUS_IRQ_RAISED;
343		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
344				  reg->irq_number);
345		goto reset_handler;
346	}
347
348	if (command & COMMAND_RAISE_MSIX_IRQ) {
349		count = pci_epc_get_msix(epc, epf->func_no);
350		if (reg->irq_number > count || count <= 0)
351			goto reset_handler;
352		reg->status = STATUS_IRQ_RAISED;
353		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
354				  reg->irq_number);
355		goto reset_handler;
 
 
 
 
 
 
 
 
 
 
 
 
 
356	}
357
358reset_handler:
359	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
360			   msecs_to_jiffies(1));
361}
362
363static void pci_epf_test_linkup(struct pci_epf *epf)
364{
 
 
 
365	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 
366
367	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
368			   msecs_to_jiffies(1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369}
370
371static void pci_epf_test_unbind(struct pci_epf *epf)
372{
373	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
374	struct pci_epc *epc = epf->epc;
375	struct pci_epf_bar *epf_bar;
376	int bar;
377
378	cancel_delayed_work(&epf_test->cmd_handler);
379	pci_epc_stop(epc);
380	for (bar = BAR_0; bar <= BAR_5; bar++) {
381		epf_bar = &epf->bar[bar];
382
383		if (epf_test->reg[bar]) {
384			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
385			pci_epf_free_space(epf, epf_test->reg[bar], bar);
386		}
387	}
388}
389
390static int pci_epf_test_set_bar(struct pci_epf *epf)
391{
392	int bar, add;
393	int ret;
394	struct pci_epf_bar *epf_bar;
395	struct pci_epc *epc = epf->epc;
396	struct device *dev = &epf->dev;
397	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
398	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
399	const struct pci_epc_features *epc_features;
 
400
401	epc_features = epf_test->epc_features;
 
 
402
403	for (bar = BAR_0; bar <= BAR_5; bar += add) {
404		epf_bar = &epf->bar[bar];
405		/*
406		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
407		 * if the specific implementation required a 64-bit BAR,
408		 * even if we only requested a 32-bit BAR.
409		 */
410		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
411
412		if (!!(epc_features->reserved_bar & (1 << bar)))
413			continue;
 
414
415		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
 
 
416		if (ret) {
417			pci_epf_free_space(epf, epf_test->reg[bar], bar);
418			dev_err(dev, "Failed to set BAR%d\n", bar);
419			if (bar == test_reg_bar)
420				return ret;
421		}
422	}
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424	return 0;
425}
426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427static int pci_epf_test_alloc_space(struct pci_epf *epf)
428{
429	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
430	struct device *dev = &epf->dev;
431	struct pci_epf_bar *epf_bar;
 
 
432	void *base;
433	int bar, add;
434	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
435	const struct pci_epc_features *epc_features;
 
436	size_t test_reg_size;
437
438	epc_features = epf_test->epc_features;
439
440	if (epc_features->bar_fixed_size[test_reg_bar])
441		test_reg_size = bar_size[test_reg_bar];
442	else
443		test_reg_size = sizeof(struct pci_epf_test_reg);
 
 
 
444
445	base = pci_epf_alloc_space(epf, test_reg_size,
446				   test_reg_bar, epc_features->align);
447	if (!base) {
448		dev_err(dev, "Failed to allocated register space\n");
449		return -ENOMEM;
450	}
451	epf_test->reg[test_reg_bar] = base;
452
453	for (bar = BAR_0; bar <= BAR_5; bar += add) {
454		epf_bar = &epf->bar[bar];
455		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 
456
457		if (bar == test_reg_bar)
458			continue;
459
460		if (!!(epc_features->reserved_bar & (1 << bar)))
461			continue;
462
463		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
464					   epc_features->align);
465		if (!base)
466			dev_err(dev, "Failed to allocate space for BAR%d\n",
467				bar);
468		epf_test->reg[bar] = base;
469	}
470
471	return 0;
472}
473
474static void pci_epf_configure_bar(struct pci_epf *epf,
475				  const struct pci_epc_features *epc_features)
476{
477	struct pci_epf_bar *epf_bar;
478	bool bar_fixed_64bit;
479	int i;
480
481	for (i = BAR_0; i <= BAR_5; i++) {
482		epf_bar = &epf->bar[i];
483		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
484		if (bar_fixed_64bit)
485			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
486		if (epc_features->bar_fixed_size[i])
487			bar_size[i] = epc_features->bar_fixed_size[i];
488	}
489}
490
491static int pci_epf_test_bind(struct pci_epf *epf)
492{
493	int ret;
494	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
495	struct pci_epf_header *header = epf->header;
496	const struct pci_epc_features *epc_features;
497	enum pci_barno test_reg_bar = BAR_0;
498	struct pci_epc *epc = epf->epc;
499	struct device *dev = &epf->dev;
500	bool linkup_notifier = false;
501	bool msix_capable = false;
502	bool msi_capable = true;
503
504	if (WARN_ON_ONCE(!epc))
505		return -EINVAL;
506
507	epc_features = pci_epc_get_features(epc, epf->func_no);
508	if (epc_features) {
509		linkup_notifier = epc_features->linkup_notifier;
510		msix_capable = epc_features->msix_capable;
511		msi_capable = epc_features->msi_capable;
512		test_reg_bar = pci_epc_get_first_free_bar(epc_features);
513		pci_epf_configure_bar(epf, epc_features);
514	}
515
 
 
 
 
516	epf_test->test_reg_bar = test_reg_bar;
517	epf_test->epc_features = epc_features;
518
519	ret = pci_epc_write_header(epc, epf->func_no, header);
520	if (ret) {
521		dev_err(dev, "Configuration header write failed\n");
522		return ret;
523	}
524
525	ret = pci_epf_test_alloc_space(epf);
526	if (ret)
527		return ret;
528
529	ret = pci_epf_test_set_bar(epf);
530	if (ret)
531		return ret;
532
533	if (msi_capable) {
534		ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
535		if (ret) {
536			dev_err(dev, "MSI configuration failed\n");
537			return ret;
538		}
539	}
540
541	if (msix_capable) {
542		ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
543		if (ret) {
544			dev_err(dev, "MSI-X configuration failed\n");
545			return ret;
546		}
547	}
548
549	if (!linkup_notifier)
550		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
551
552	return 0;
553}
554
555static const struct pci_epf_device_id pci_epf_test_ids[] = {
556	{
557		.name = "pci_epf_test",
558	},
559	{},
560};
561
562static int pci_epf_test_probe(struct pci_epf *epf)
 
563{
564	struct pci_epf_test *epf_test;
565	struct device *dev = &epf->dev;
566
567	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
568	if (!epf_test)
569		return -ENOMEM;
570
571	epf->header = &test_header;
572	epf_test->epf = epf;
573
574	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
575
 
 
576	epf_set_drvdata(epf, epf_test);
577	return 0;
578}
579
580static struct pci_epf_ops ops = {
581	.unbind	= pci_epf_test_unbind,
582	.bind	= pci_epf_test_bind,
583	.linkup = pci_epf_test_linkup,
584};
585
586static struct pci_epf_driver test_driver = {
587	.driver.name	= "pci_epf_test",
588	.probe		= pci_epf_test_probe,
589	.id_table	= pci_epf_test_ids,
590	.ops		= &ops,
591	.owner		= THIS_MODULE,
592};
593
594static int __init pci_epf_test_init(void)
595{
596	int ret;
597
598	kpcitest_workqueue = alloc_workqueue("kpcitest",
599					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
600	if (!kpcitest_workqueue) {
601		pr_err("Failed to allocate the kpcitest work queue\n");
602		return -ENOMEM;
603	}
604
605	ret = pci_epf_register_driver(&test_driver);
606	if (ret) {
 
607		pr_err("Failed to register pci epf test driver --> %d\n", ret);
608		return ret;
609	}
610
611	return 0;
612}
613module_init(pci_epf_test_init);
614
615static void __exit pci_epf_test_exit(void)
616{
 
 
617	pci_epf_unregister_driver(&test_driver);
618}
619module_exit(pci_epf_test_exit);
620
621MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
622MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
623MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/dmaengine.h>
  12#include <linux/io.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/pci_ids.h>
  16#include <linux/random.h>
  17
  18#include <linux/pci-epc.h>
  19#include <linux/pci-epf.h>
  20#include <linux/pci_regs.h>
  21
  22#define IRQ_TYPE_INTX			0
  23#define IRQ_TYPE_MSI			1
  24#define IRQ_TYPE_MSIX			2
  25
  26#define COMMAND_RAISE_INTX_IRQ		BIT(0)
  27#define COMMAND_RAISE_MSI_IRQ		BIT(1)
  28#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
  29#define COMMAND_READ			BIT(3)
  30#define COMMAND_WRITE			BIT(4)
  31#define COMMAND_COPY			BIT(5)
  32
  33#define STATUS_READ_SUCCESS		BIT(0)
  34#define STATUS_READ_FAIL		BIT(1)
  35#define STATUS_WRITE_SUCCESS		BIT(2)
  36#define STATUS_WRITE_FAIL		BIT(3)
  37#define STATUS_COPY_SUCCESS		BIT(4)
  38#define STATUS_COPY_FAIL		BIT(5)
  39#define STATUS_IRQ_RAISED		BIT(6)
  40#define STATUS_SRC_ADDR_INVALID		BIT(7)
  41#define STATUS_DST_ADDR_INVALID		BIT(8)
  42
  43#define FLAG_USE_DMA			BIT(0)
  44
  45#define TIMER_RESOLUTION		1
  46
  47static struct workqueue_struct *kpcitest_workqueue;
  48
  49struct pci_epf_test {
  50	void			*reg[PCI_STD_NUM_BARS];
  51	struct pci_epf		*epf;
  52	enum pci_barno		test_reg_bar;
  53	size_t			msix_table_offset;
  54	struct delayed_work	cmd_handler;
  55	struct dma_chan		*dma_chan_tx;
  56	struct dma_chan		*dma_chan_rx;
  57	struct dma_chan		*transfer_chan;
  58	dma_cookie_t		transfer_cookie;
  59	enum dma_status		transfer_status;
  60	struct completion	transfer_complete;
  61	bool			dma_supported;
  62	bool			dma_private;
  63	const struct pci_epc_features *epc_features;
  64};
  65
  66struct pci_epf_test_reg {
  67	u32	magic;
  68	u32	command;
  69	u32	status;
  70	u64	src_addr;
  71	u64	dst_addr;
  72	u32	size;
  73	u32	checksum;
  74	u32	irq_type;
  75	u32	irq_number;
  76	u32	flags;
  77} __packed;
  78
  79static struct pci_epf_header test_header = {
  80	.vendorid	= PCI_ANY_ID,
  81	.deviceid	= PCI_ANY_ID,
  82	.baseclass_code = PCI_CLASS_OTHERS,
  83	.interrupt_pin	= PCI_INTERRUPT_INTA,
  84};
  85
  86static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
  87
  88static void pci_epf_test_dma_callback(void *param)
  89{
  90	struct pci_epf_test *epf_test = param;
  91	struct dma_tx_state state;
  92
  93	epf_test->transfer_status =
  94		dmaengine_tx_status(epf_test->transfer_chan,
  95				    epf_test->transfer_cookie, &state);
  96	if (epf_test->transfer_status == DMA_COMPLETE ||
  97	    epf_test->transfer_status == DMA_ERROR)
  98		complete(&epf_test->transfer_complete);
  99}
 100
 101/**
 102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
 103 *				  data between PCIe EP and remote PCIe RC
 104 * @epf_test: the EPF test device that performs the data transfer operation
 105 * @dma_dst: The destination address of the data transfer. It can be a physical
 106 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 107 * @dma_src: The source address of the data transfer. It can be a physical
 108 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 109 * @len: The size of the data transfer
 110 * @dma_remote: remote RC physical address
 111 * @dir: DMA transfer direction
 112 *
 113 * Function that uses dmaengine API to transfer data between PCIe EP and remote
 114 * PCIe RC. The source and destination address can be a physical address given
 115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
 116 *
 117 * The function returns '0' on success and negative value on failure.
 118 */
 119static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
 120				      dma_addr_t dma_dst, dma_addr_t dma_src,
 121				      size_t len, dma_addr_t dma_remote,
 122				      enum dma_transfer_direction dir)
 123{
 124	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
 125				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
 126	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
 127	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 128	struct pci_epf *epf = epf_test->epf;
 129	struct dma_async_tx_descriptor *tx;
 130	struct dma_slave_config sconf = {};
 131	struct device *dev = &epf->dev;
 132	int ret;
 
 
 133
 134	if (IS_ERR_OR_NULL(chan)) {
 135		dev_err(dev, "Invalid DMA memcpy channel\n");
 136		return -EINVAL;
 
 
 
 137	}
 138
 139	if (epf_test->dma_private) {
 140		sconf.direction = dir;
 141		if (dir == DMA_MEM_TO_DEV)
 142			sconf.dst_addr = dma_remote;
 143		else
 144			sconf.src_addr = dma_remote;
 145
 146		if (dmaengine_slave_config(chan, &sconf)) {
 147			dev_err(dev, "DMA slave config fail\n");
 148			return -EIO;
 149		}
 150		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
 151						 flags);
 152	} else {
 153		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
 154					       flags);
 155	}
 156
 157	if (!tx) {
 158		dev_err(dev, "Failed to prepare DMA memcpy\n");
 159		return -EIO;
 
 
 
 160	}
 161
 162	reinit_completion(&epf_test->transfer_complete);
 163	epf_test->transfer_chan = chan;
 164	tx->callback = pci_epf_test_dma_callback;
 165	tx->callback_param = epf_test;
 166	epf_test->transfer_cookie = dmaengine_submit(tx);
 167
 168	ret = dma_submit_error(epf_test->transfer_cookie);
 169	if (ret) {
 170		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
 171		goto terminate;
 172	}
 173
 174	dma_async_issue_pending(chan);
 175	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
 176	if (ret < 0) {
 177		dev_err(dev, "DMA wait_for_completion interrupted\n");
 178		goto terminate;
 179	}
 180
 181	if (epf_test->transfer_status == DMA_ERROR) {
 182		dev_err(dev, "DMA transfer failed\n");
 183		ret = -EIO;
 184	}
 185
 186terminate:
 187	dmaengine_terminate_sync(chan);
 188
 189	return ret;
 190}
 191
 192struct epf_dma_filter {
 193	struct device *dev;
 194	u32 dma_mask;
 195};
 196
 197static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
 198{
 199	struct epf_dma_filter *filter = node;
 200	struct dma_slave_caps caps;
 201
 202	memset(&caps, 0, sizeof(caps));
 203	dma_get_slave_caps(chan, &caps);
 204
 205	return chan->device->dev == filter->dev
 206		&& (filter->dma_mask & caps.directions);
 207}
 208
 209/**
 210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
 211 * @epf_test: the EPF test device that performs data transfer operation
 212 *
 213 * Function to initialize EPF test DMA channel.
 214 */
 215static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
 216{
 
 
 
 
 
 217	struct pci_epf *epf = epf_test->epf;
 218	struct device *dev = &epf->dev;
 219	struct epf_dma_filter filter;
 220	struct dma_chan *dma_chan;
 221	dma_cap_mask_t mask;
 222	int ret;
 223
 224	filter.dev = epf->epc->dev.parent;
 225	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
 226
 227	dma_cap_zero(mask);
 228	dma_cap_set(DMA_SLAVE, mask);
 229	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 230	if (!dma_chan) {
 231		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
 232		goto fail_back_tx;
 233	}
 234
 235	epf_test->dma_chan_rx = dma_chan;
 236
 237	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
 238	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
 239
 240	if (!dma_chan) {
 241		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
 242		goto fail_back_rx;
 243	}
 244
 245	epf_test->dma_chan_tx = dma_chan;
 246	epf_test->dma_private = true;
 247
 248	init_completion(&epf_test->transfer_complete);
 249
 250	return 0;
 251
 252fail_back_rx:
 253	dma_release_channel(epf_test->dma_chan_rx);
 254	epf_test->dma_chan_rx = NULL;
 255
 256fail_back_tx:
 257	dma_cap_zero(mask);
 258	dma_cap_set(DMA_MEMCPY, mask);
 259
 260	dma_chan = dma_request_chan_by_mask(&mask);
 261	if (IS_ERR(dma_chan)) {
 262		ret = PTR_ERR(dma_chan);
 263		if (ret != -EPROBE_DEFER)
 264			dev_err(dev, "Failed to get DMA channel\n");
 265		return ret;
 266	}
 267	init_completion(&epf_test->transfer_complete);
 268
 269	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
 270
 271	return 0;
 272}
 273
 274/**
 275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
 276 * @epf_test: the EPF test device that performs data transfer operation
 277 *
 278 * Helper to cleanup EPF test DMA channel.
 279 */
 280static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
 281{
 282	if (!epf_test->dma_supported)
 283		return;
 284
 285	dma_release_channel(epf_test->dma_chan_tx);
 286	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
 287		epf_test->dma_chan_tx = NULL;
 288		epf_test->dma_chan_rx = NULL;
 289		return;
 290	}
 291
 292	dma_release_channel(epf_test->dma_chan_rx);
 293	epf_test->dma_chan_rx = NULL;
 294}
 295
 296static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
 297				    const char *op, u64 size,
 298				    struct timespec64 *start,
 299				    struct timespec64 *end, bool dma)
 300{
 301	struct timespec64 ts = timespec64_sub(*end, *start);
 302	u64 rate = 0, ns;
 303
 304	/* calculate the rate */
 305	ns = timespec64_to_ns(&ts);
 306	if (ns)
 307		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
 308
 309	dev_info(&epf_test->epf->dev,
 310		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
 311		 op, size, dma ? "YES" : "NO",
 312		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
 313}
 314
 315static void pci_epf_test_copy(struct pci_epf_test *epf_test,
 316			      struct pci_epf_test_reg *reg)
 317{
 318	int ret = 0;
 319	struct timespec64 start, end;
 
 
 320	struct pci_epf *epf = epf_test->epf;
 321	struct pci_epc *epc = epf->epc;
 322	struct device *dev = &epf->dev;
 323	struct pci_epc_map src_map, dst_map;
 324	u64 src_addr = reg->src_addr;
 325	u64 dst_addr = reg->dst_addr;
 326	size_t copy_size = reg->size;
 327	ssize_t map_size = 0;
 328	void *copy_buf = NULL, *buf;
 329
 330	if (reg->flags & FLAG_USE_DMA) {
 331		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
 332			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
 333			ret = -EINVAL;
 334			goto set_status;
 335		}
 336	} else {
 337		copy_buf = kzalloc(copy_size, GFP_KERNEL);
 338		if (!copy_buf) {
 339			ret = -ENOMEM;
 340			goto set_status;
 341		}
 342		buf = copy_buf;
 343	}
 344
 345	while (copy_size) {
 346		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
 347				      src_addr, copy_size, &src_map);
 348		if (ret) {
 349			dev_err(dev, "Failed to map source address\n");
 350			reg->status = STATUS_SRC_ADDR_INVALID;
 351			goto free_buf;
 352		}
 353
 354		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
 355					   dst_addr, copy_size, &dst_map);
 356		if (ret) {
 357			dev_err(dev, "Failed to map destination address\n");
 358			reg->status = STATUS_DST_ADDR_INVALID;
 359			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
 360					  &src_map);
 361			goto free_buf;
 362		}
 363
 364		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
 365
 366		ktime_get_ts64(&start);
 367		if (reg->flags & FLAG_USE_DMA) {
 368			ret = pci_epf_test_data_transfer(epf_test,
 369					dst_map.phys_addr, src_map.phys_addr,
 370					map_size, 0, DMA_MEM_TO_MEM);
 371			if (ret) {
 372				dev_err(dev, "Data transfer failed\n");
 373				goto unmap;
 374			}
 375		} else {
 376			memcpy_fromio(buf, src_map.virt_addr, map_size);
 377			memcpy_toio(dst_map.virt_addr, buf, map_size);
 378			buf += map_size;
 379		}
 380		ktime_get_ts64(&end);
 381
 382		copy_size -= map_size;
 383		src_addr += map_size;
 384		dst_addr += map_size;
 385
 386		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
 387		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
 388		map_size = 0;
 389	}
 390
 391	pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start,
 392				&end, reg->flags & FLAG_USE_DMA);
 393
 394unmap:
 395	if (map_size) {
 396		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
 397		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
 398	}
 399
 400free_buf:
 401	kfree(copy_buf);
 402
 403set_status:
 404	if (!ret)
 405		reg->status |= STATUS_COPY_SUCCESS;
 406	else
 407		reg->status |= STATUS_COPY_FAIL;
 408}
 409
 410static void pci_epf_test_read(struct pci_epf_test *epf_test,
 411			      struct pci_epf_test_reg *reg)
 412{
 413	int ret = 0;
 414	void *src_buf, *buf;
 415	u32 crc32;
 416	struct pci_epc_map map;
 417	phys_addr_t dst_phys_addr;
 418	struct timespec64 start, end;
 419	struct pci_epf *epf = epf_test->epf;
 420	struct pci_epc *epc = epf->epc;
 421	struct device *dev = &epf->dev;
 422	struct device *dma_dev = epf->epc->dev.parent;
 423	u64 src_addr = reg->src_addr;
 424	size_t src_size = reg->size;
 425	ssize_t map_size = 0;
 426
 427	src_buf = kzalloc(src_size, GFP_KERNEL);
 428	if (!src_buf) {
 
 
 429		ret = -ENOMEM;
 430		goto set_status;
 431	}
 432	buf = src_buf;
 433
 434	while (src_size) {
 435		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
 436					   src_addr, src_size, &map);
 437		if (ret) {
 438			dev_err(dev, "Failed to map address\n");
 439			reg->status = STATUS_SRC_ADDR_INVALID;
 440			goto free_buf;
 441		}
 442
 443		map_size = map.pci_size;
 444		if (reg->flags & FLAG_USE_DMA) {
 445			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
 446						       DMA_FROM_DEVICE);
 447			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
 448				dev_err(dev,
 449					"Failed to map destination buffer addr\n");
 450				ret = -ENOMEM;
 451				goto unmap;
 452			}
 453
 454			ktime_get_ts64(&start);
 455			ret = pci_epf_test_data_transfer(epf_test,
 456					dst_phys_addr, map.phys_addr,
 457					map_size, src_addr, DMA_DEV_TO_MEM);
 458			if (ret)
 459				dev_err(dev, "Data transfer failed\n");
 460			ktime_get_ts64(&end);
 461
 462			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
 463					 DMA_FROM_DEVICE);
 464
 465			if (ret)
 466				goto unmap;
 467		} else {
 468			ktime_get_ts64(&start);
 469			memcpy_fromio(buf, map.virt_addr, map_size);
 470			ktime_get_ts64(&end);
 471		}
 472
 473		src_size -= map_size;
 474		src_addr += map_size;
 475		buf += map_size;
 476
 477		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
 478		map_size = 0;
 479	}
 480
 481	pci_epf_test_print_rate(epf_test, "READ", reg->size, &start,
 482				&end, reg->flags & FLAG_USE_DMA);
 483
 484	crc32 = crc32_le(~0, src_buf, reg->size);
 485	if (crc32 != reg->checksum)
 486		ret = -EIO;
 487
 488unmap:
 489	if (map_size)
 490		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
 491
 492free_buf:
 493	kfree(src_buf);
 494
 495set_status:
 496	if (!ret)
 497		reg->status |= STATUS_READ_SUCCESS;
 498	else
 499		reg->status |= STATUS_READ_FAIL;
 500}
 501
 502static void pci_epf_test_write(struct pci_epf_test *epf_test,
 503			       struct pci_epf_test_reg *reg)
 504{
 505	int ret = 0;
 506	void *dst_buf, *buf;
 507	struct pci_epc_map map;
 508	phys_addr_t src_phys_addr;
 509	struct timespec64 start, end;
 510	struct pci_epf *epf = epf_test->epf;
 511	struct pci_epc *epc = epf->epc;
 512	struct device *dev = &epf->dev;
 513	struct device *dma_dev = epf->epc->dev.parent;
 514	u64 dst_addr = reg->dst_addr;
 515	size_t dst_size = reg->size;
 516	ssize_t map_size = 0;
 517
 518	dst_buf = kzalloc(dst_size, GFP_KERNEL);
 519	if (!dst_buf) {
 520		ret = -ENOMEM;
 521		goto set_status;
 522	}
 523	get_random_bytes(dst_buf, dst_size);
 524	reg->checksum = crc32_le(~0, dst_buf, dst_size);
 525	buf = dst_buf;
 526
 527	while (dst_size) {
 528		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
 529					   dst_addr, dst_size, &map);
 530		if (ret) {
 531			dev_err(dev, "Failed to map address\n");
 532			reg->status = STATUS_DST_ADDR_INVALID;
 533			goto free_buf;
 534		}
 535
 536		map_size = map.pci_size;
 537		if (reg->flags & FLAG_USE_DMA) {
 538			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
 539						       DMA_TO_DEVICE);
 540			if (dma_mapping_error(dma_dev, src_phys_addr)) {
 541				dev_err(dev,
 542					"Failed to map source buffer addr\n");
 543				ret = -ENOMEM;
 544				goto unmap;
 545			}
 546
 547			ktime_get_ts64(&start);
 548
 549			ret = pci_epf_test_data_transfer(epf_test,
 550						map.phys_addr, src_phys_addr,
 551						map_size, dst_addr,
 552						DMA_MEM_TO_DEV);
 553			if (ret)
 554				dev_err(dev, "Data transfer failed\n");
 555			ktime_get_ts64(&end);
 556
 557			dma_unmap_single(dma_dev, src_phys_addr, map_size,
 558					 DMA_TO_DEVICE);
 559
 560			if (ret)
 561				goto unmap;
 562		} else {
 563			ktime_get_ts64(&start);
 564			memcpy_toio(map.virt_addr, buf, map_size);
 565			ktime_get_ts64(&end);
 566		}
 567
 568		dst_size -= map_size;
 569		dst_addr += map_size;
 570		buf += map_size;
 571
 572		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
 573		map_size = 0;
 574	}
 575
 576	pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start,
 577				&end, reg->flags & FLAG_USE_DMA);
 578
 579	/*
 580	 * wait 1ms inorder for the write to complete. Without this delay L3
 581	 * error in observed in the host system.
 582	 */
 583	usleep_range(1000, 2000);
 584
 585unmap:
 586	if (map_size)
 587		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
 588
 589free_buf:
 590	kfree(dst_buf);
 591
 592set_status:
 593	if (!ret)
 594		reg->status |= STATUS_WRITE_SUCCESS;
 595	else
 596		reg->status |= STATUS_WRITE_FAIL;
 597}
 598
 599static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
 600				   struct pci_epf_test_reg *reg)
 601{
 602	struct pci_epf *epf = epf_test->epf;
 603	struct device *dev = &epf->dev;
 604	struct pci_epc *epc = epf->epc;
 605	u32 status = reg->status | STATUS_IRQ_RAISED;
 606	int count;
 607
 608	/*
 609	 * Set the status before raising the IRQ to ensure that the host sees
 610	 * the updated value when it gets the IRQ.
 611	 */
 612	WRITE_ONCE(reg->status, status);
 613
 614	switch (reg->irq_type) {
 615	case IRQ_TYPE_INTX:
 616		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 617				  PCI_IRQ_INTX, 0);
 618		break;
 619	case IRQ_TYPE_MSI:
 620		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
 621		if (reg->irq_number > count || count <= 0) {
 622			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
 623				reg->irq_number, count);
 624			return;
 625		}
 626		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 627				  PCI_IRQ_MSI, reg->irq_number);
 628		break;
 629	case IRQ_TYPE_MSIX:
 630		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
 631		if (reg->irq_number > count || count <= 0) {
 632			dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
 633				reg->irq_number, count);
 634			return;
 635		}
 636		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
 637				  PCI_IRQ_MSIX, reg->irq_number);
 638		break;
 639	default:
 640		dev_err(dev, "Failed to raise IRQ, unknown type\n");
 641		break;
 642	}
 643}
 644
 645static void pci_epf_test_cmd_handler(struct work_struct *work)
 646{
 
 
 647	u32 command;
 648	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
 649						     cmd_handler.work);
 650	struct pci_epf *epf = epf_test->epf;
 651	struct device *dev = &epf->dev;
 
 652	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 653	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 654
 655	command = READ_ONCE(reg->command);
 656	if (!command)
 657		goto reset_handler;
 658
 659	WRITE_ONCE(reg->command, 0);
 660	WRITE_ONCE(reg->status, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661
 662	if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
 663	    !epf_test->dma_supported) {
 664		dev_err(dev, "Cannot transfer data using DMA\n");
 
 
 
 
 
 665		goto reset_handler;
 666	}
 667
 668	if (reg->irq_type > IRQ_TYPE_MSIX) {
 669		dev_err(dev, "Failed to detect IRQ type\n");
 
 
 
 
 
 670		goto reset_handler;
 671	}
 672
 673	switch (command) {
 674	case COMMAND_RAISE_INTX_IRQ:
 675	case COMMAND_RAISE_MSI_IRQ:
 676	case COMMAND_RAISE_MSIX_IRQ:
 677		pci_epf_test_raise_irq(epf_test, reg);
 678		break;
 679	case COMMAND_WRITE:
 680		pci_epf_test_write(epf_test, reg);
 681		pci_epf_test_raise_irq(epf_test, reg);
 682		break;
 683	case COMMAND_READ:
 684		pci_epf_test_read(epf_test, reg);
 685		pci_epf_test_raise_irq(epf_test, reg);
 686		break;
 687	case COMMAND_COPY:
 688		pci_epf_test_copy(epf_test, reg);
 689		pci_epf_test_raise_irq(epf_test, reg);
 690		break;
 691	default:
 692		dev_err(dev, "Invalid command 0x%x\n", command);
 693		break;
 694	}
 695
 696reset_handler:
 697	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 698			   msecs_to_jiffies(1));
 699}
 700
 701static int pci_epf_test_set_bar(struct pci_epf *epf)
 702{
 703	int bar, ret;
 704	struct pci_epc *epc = epf->epc;
 705	struct device *dev = &epf->dev;
 706	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 707	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 708
 709	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 710		if (!epf_test->reg[bar])
 711			continue;
 712
 713		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
 714				      &epf->bar[bar]);
 715		if (ret) {
 716			pci_epf_free_space(epf, epf_test->reg[bar], bar,
 717					   PRIMARY_INTERFACE);
 718			dev_err(dev, "Failed to set BAR%d\n", bar);
 719			if (bar == test_reg_bar)
 720				return ret;
 721		}
 722	}
 723
 724	return 0;
 725}
 726
 727static void pci_epf_test_clear_bar(struct pci_epf *epf)
 728{
 729	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 730	struct pci_epc *epc = epf->epc;
 
 731	int bar;
 732
 733	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 734		if (!epf_test->reg[bar])
 735			continue;
 736
 737		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
 738				  &epf->bar[bar]);
 
 
 
 739	}
 740}
 741
 742static int pci_epf_test_epc_init(struct pci_epf *epf)
 743{
 744	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 745	struct pci_epf_header *header = epf->header;
 746	const struct pci_epc_features *epc_features = epf_test->epc_features;
 747	struct pci_epc *epc = epf->epc;
 748	struct device *dev = &epf->dev;
 749	bool linkup_notifier = false;
 750	int ret;
 751
 752	epf_test->dma_supported = true;
 753
 754	ret = pci_epf_test_init_dma_chan(epf_test);
 755	if (ret)
 756		epf_test->dma_supported = false;
 757
 758	if (epf->vfunc_no <= 1) {
 759		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
 760		if (ret) {
 761			dev_err(dev, "Configuration header write failed\n");
 762			return ret;
 763		}
 764	}
 
 765
 766	ret = pci_epf_test_set_bar(epf);
 767	if (ret)
 768		return ret;
 769
 770	if (epc_features->msi_capable) {
 771		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
 772				      epf->msi_interrupts);
 773		if (ret) {
 774			dev_err(dev, "MSI configuration failed\n");
 775			return ret;
 
 
 776		}
 777	}
 778
 779	if (epc_features->msix_capable) {
 780		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
 781				       epf->msix_interrupts,
 782				       epf_test->test_reg_bar,
 783				       epf_test->msix_table_offset);
 784		if (ret) {
 785			dev_err(dev, "MSI-X configuration failed\n");
 786			return ret;
 787		}
 788	}
 789
 790	linkup_notifier = epc_features->linkup_notifier;
 791	if (!linkup_notifier)
 792		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
 793
 794	return 0;
 795}
 796
 797static void pci_epf_test_epc_deinit(struct pci_epf *epf)
 798{
 799	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 800
 801	cancel_delayed_work_sync(&epf_test->cmd_handler);
 802	pci_epf_test_clean_dma_chan(epf_test);
 803	pci_epf_test_clear_bar(epf);
 804}
 805
 806static int pci_epf_test_link_up(struct pci_epf *epf)
 807{
 808	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 809
 810	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
 811			   msecs_to_jiffies(1));
 812
 813	return 0;
 814}
 815
 816static int pci_epf_test_link_down(struct pci_epf *epf)
 817{
 818	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 819
 820	cancel_delayed_work_sync(&epf_test->cmd_handler);
 821
 822	return 0;
 823}
 824
 825static const struct pci_epc_event_ops pci_epf_test_event_ops = {
 826	.epc_init = pci_epf_test_epc_init,
 827	.epc_deinit = pci_epf_test_epc_deinit,
 828	.link_up = pci_epf_test_link_up,
 829	.link_down = pci_epf_test_link_down,
 830};
 831
 832static int pci_epf_test_alloc_space(struct pci_epf *epf)
 833{
 834	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 835	struct device *dev = &epf->dev;
 836	size_t msix_table_size = 0;
 837	size_t test_reg_bar_size;
 838	size_t pba_size = 0;
 839	void *base;
 
 840	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 841	enum pci_barno bar;
 842	const struct pci_epc_features *epc_features = epf_test->epc_features;
 843	size_t test_reg_size;
 844
 845	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
 846
 847	if (epc_features->msix_capable) {
 848		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
 849		epf_test->msix_table_offset = test_reg_bar_size;
 850		/* Align to QWORD or 8 Bytes */
 851		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
 852	}
 853	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
 854
 855	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
 856				   epc_features, PRIMARY_INTERFACE);
 857	if (!base) {
 858		dev_err(dev, "Failed to allocated register space\n");
 859		return -ENOMEM;
 860	}
 861	epf_test->reg[test_reg_bar] = base;
 862
 863	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
 864		bar = pci_epc_get_next_free_bar(epc_features, bar);
 865		if (bar == NO_BAR)
 866			break;
 867
 868		if (bar == test_reg_bar)
 869			continue;
 870
 
 
 
 871		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
 872					   epc_features, PRIMARY_INTERFACE);
 873		if (!base)
 874			dev_err(dev, "Failed to allocate space for BAR%d\n",
 875				bar);
 876		epf_test->reg[bar] = base;
 877	}
 878
 879	return 0;
 880}
 881
 882static void pci_epf_test_free_space(struct pci_epf *epf)
 
 883{
 884	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 885	int bar;
 886
 887	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 888		if (!epf_test->reg[bar])
 889			continue;
 890
 891		pci_epf_free_space(epf, epf_test->reg[bar], bar,
 892				   PRIMARY_INTERFACE);
 
 
 893	}
 894}
 895
 896static int pci_epf_test_bind(struct pci_epf *epf)
 897{
 898	int ret;
 899	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 
 900	const struct pci_epc_features *epc_features;
 901	enum pci_barno test_reg_bar = BAR_0;
 902	struct pci_epc *epc = epf->epc;
 
 
 
 
 903
 904	if (WARN_ON_ONCE(!epc))
 905		return -EINVAL;
 906
 907	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
 908	if (!epc_features) {
 909		dev_err(&epf->dev, "epc_features not implemented\n");
 910		return -EOPNOTSUPP;
 
 
 
 911	}
 912
 913	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
 914	if (test_reg_bar < 0)
 915		return -EINVAL;
 916
 917	epf_test->test_reg_bar = test_reg_bar;
 918	epf_test->epc_features = epc_features;
 919
 
 
 
 
 
 
 920	ret = pci_epf_test_alloc_space(epf);
 921	if (ret)
 922		return ret;
 923
 924	return 0;
 925}
 
 926
 927static void pci_epf_test_unbind(struct pci_epf *epf)
 928{
 929	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 930	struct pci_epc *epc = epf->epc;
 
 
 
 931
 932	cancel_delayed_work_sync(&epf_test->cmd_handler);
 933	if (epc->init_complete) {
 934		pci_epf_test_clean_dma_chan(epf_test);
 935		pci_epf_test_clear_bar(epf);
 
 
 936	}
 937	pci_epf_test_free_space(epf);
 
 
 
 
 938}
 939
 940static const struct pci_epf_device_id pci_epf_test_ids[] = {
 941	{
 942		.name = "pci_epf_test",
 943	},
 944	{},
 945};
 946
 947static int pci_epf_test_probe(struct pci_epf *epf,
 948			      const struct pci_epf_device_id *id)
 949{
 950	struct pci_epf_test *epf_test;
 951	struct device *dev = &epf->dev;
 952
 953	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
 954	if (!epf_test)
 955		return -ENOMEM;
 956
 957	epf->header = &test_header;
 958	epf_test->epf = epf;
 959
 960	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
 961
 962	epf->event_ops = &pci_epf_test_event_ops;
 963
 964	epf_set_drvdata(epf, epf_test);
 965	return 0;
 966}
 967
 968static const struct pci_epf_ops ops = {
 969	.unbind	= pci_epf_test_unbind,
 970	.bind	= pci_epf_test_bind,
 
 971};
 972
 973static struct pci_epf_driver test_driver = {
 974	.driver.name	= "pci_epf_test",
 975	.probe		= pci_epf_test_probe,
 976	.id_table	= pci_epf_test_ids,
 977	.ops		= &ops,
 978	.owner		= THIS_MODULE,
 979};
 980
 981static int __init pci_epf_test_init(void)
 982{
 983	int ret;
 984
 985	kpcitest_workqueue = alloc_workqueue("kpcitest",
 986					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 987	if (!kpcitest_workqueue) {
 988		pr_err("Failed to allocate the kpcitest work queue\n");
 989		return -ENOMEM;
 990	}
 991
 992	ret = pci_epf_register_driver(&test_driver);
 993	if (ret) {
 994		destroy_workqueue(kpcitest_workqueue);
 995		pr_err("Failed to register pci epf test driver --> %d\n", ret);
 996		return ret;
 997	}
 998
 999	return 0;
1000}
1001module_init(pci_epf_test_init);
1002
1003static void __exit pci_epf_test_exit(void)
1004{
1005	if (kpcitest_workqueue)
1006		destroy_workqueue(kpcitest_workqueue);
1007	pci_epf_unregister_driver(&test_driver);
1008}
1009module_exit(pci_epf_test_exit);
1010
1011MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1012MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1013MODULE_LICENSE("GPL v2");