Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * Test driver to test endpoint functionality
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 
 11#include <linux/io.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/pci_ids.h>
 15#include <linux/random.h>
 16
 17#include <linux/pci-epc.h>
 18#include <linux/pci-epf.h>
 19#include <linux/pci_regs.h>
 20
 21#define IRQ_TYPE_LEGACY			0
 22#define IRQ_TYPE_MSI			1
 23#define IRQ_TYPE_MSIX			2
 24
 25#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
 26#define COMMAND_RAISE_MSI_IRQ		BIT(1)
 27#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
 28#define COMMAND_READ			BIT(3)
 29#define COMMAND_WRITE			BIT(4)
 30#define COMMAND_COPY			BIT(5)
 31
 32#define STATUS_READ_SUCCESS		BIT(0)
 33#define STATUS_READ_FAIL		BIT(1)
 34#define STATUS_WRITE_SUCCESS		BIT(2)
 35#define STATUS_WRITE_FAIL		BIT(3)
 36#define STATUS_COPY_SUCCESS		BIT(4)
 37#define STATUS_COPY_FAIL		BIT(5)
 38#define STATUS_IRQ_RAISED		BIT(6)
 39#define STATUS_SRC_ADDR_INVALID		BIT(7)
 40#define STATUS_DST_ADDR_INVALID		BIT(8)
 41
 
 
 42#define TIMER_RESOLUTION		1
 43
 44static struct workqueue_struct *kpcitest_workqueue;
 45
 46struct pci_epf_test {
 47	void			*reg[6];
 48	struct pci_epf		*epf;
 49	enum pci_barno		test_reg_bar;
 
 50	struct delayed_work	cmd_handler;
 
 
 
 51	const struct pci_epc_features *epc_features;
 52};
 53
 54struct pci_epf_test_reg {
 55	u32	magic;
 56	u32	command;
 57	u32	status;
 58	u64	src_addr;
 59	u64	dst_addr;
 60	u32	size;
 61	u32	checksum;
 62	u32	irq_type;
 63	u32	irq_number;
 
 64} __packed;
 65
 66static struct pci_epf_header test_header = {
 67	.vendorid	= PCI_ANY_ID,
 68	.deviceid	= PCI_ANY_ID,
 69	.baseclass_code = PCI_CLASS_OTHERS,
 70	.interrupt_pin	= PCI_INTERRUPT_INTA,
 71};
 72
 73static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75static int pci_epf_test_copy(struct pci_epf_test *epf_test)
 76{
 77	int ret;
 
 78	void __iomem *src_addr;
 79	void __iomem *dst_addr;
 80	phys_addr_t src_phys_addr;
 81	phys_addr_t dst_phys_addr;
 
 82	struct pci_epf *epf = epf_test->epf;
 83	struct device *dev = &epf->dev;
 84	struct pci_epc *epc = epf->epc;
 85	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
 86	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
 87
 88	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
 89	if (!src_addr) {
 90		dev_err(dev, "Failed to allocate source address\n");
 91		reg->status = STATUS_SRC_ADDR_INVALID;
 92		ret = -ENOMEM;
 93		goto err;
 94	}
 95
 96	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
 97			       reg->size);
 98	if (ret) {
 99		dev_err(dev, "Failed to map source address\n");
100		reg->status = STATUS_SRC_ADDR_INVALID;
101		goto err_src_addr;
102	}
103
104	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
105	if (!dst_addr) {
106		dev_err(dev, "Failed to allocate destination address\n");
107		reg->status = STATUS_DST_ADDR_INVALID;
108		ret = -ENOMEM;
109		goto err_src_map_addr;
110	}
111
112	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
113			       reg->size);
114	if (ret) {
115		dev_err(dev, "Failed to map destination address\n");
116		reg->status = STATUS_DST_ADDR_INVALID;
117		goto err_dst_addr;
118	}
119
120	memcpy(dst_addr, src_addr, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
123
124err_dst_addr:
125	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
126
127err_src_map_addr:
128	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
129
130err_src_addr:
131	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
132
133err:
134	return ret;
135}
136
137static int pci_epf_test_read(struct pci_epf_test *epf_test)
138{
139	int ret;
140	void __iomem *src_addr;
141	void *buf;
142	u32 crc32;
 
143	phys_addr_t phys_addr;
 
 
144	struct pci_epf *epf = epf_test->epf;
145	struct device *dev = &epf->dev;
146	struct pci_epc *epc = epf->epc;
 
147	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
148	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
149
150	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
151	if (!src_addr) {
152		dev_err(dev, "Failed to allocate address\n");
153		reg->status = STATUS_SRC_ADDR_INVALID;
154		ret = -ENOMEM;
155		goto err;
156	}
157
158	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
159			       reg->size);
160	if (ret) {
161		dev_err(dev, "Failed to map address\n");
162		reg->status = STATUS_SRC_ADDR_INVALID;
163		goto err_addr;
164	}
165
166	buf = kzalloc(reg->size, GFP_KERNEL);
167	if (!buf) {
168		ret = -ENOMEM;
169		goto err_map_addr;
170	}
171
172	memcpy_fromio(buf, src_addr, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
174	crc32 = crc32_le(~0, buf, reg->size);
175	if (crc32 != reg->checksum)
176		ret = -EIO;
177
 
178	kfree(buf);
179
180err_map_addr:
181	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
182
183err_addr:
184	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
185
186err:
187	return ret;
188}
189
190static int pci_epf_test_write(struct pci_epf_test *epf_test)
191{
192	int ret;
193	void __iomem *dst_addr;
194	void *buf;
 
195	phys_addr_t phys_addr;
 
 
196	struct pci_epf *epf = epf_test->epf;
197	struct device *dev = &epf->dev;
198	struct pci_epc *epc = epf->epc;
 
199	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
200	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
201
202	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
203	if (!dst_addr) {
204		dev_err(dev, "Failed to allocate address\n");
205		reg->status = STATUS_DST_ADDR_INVALID;
206		ret = -ENOMEM;
207		goto err;
208	}
209
210	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
211			       reg->size);
212	if (ret) {
213		dev_err(dev, "Failed to map address\n");
214		reg->status = STATUS_DST_ADDR_INVALID;
215		goto err_addr;
216	}
217
218	buf = kzalloc(reg->size, GFP_KERNEL);
219	if (!buf) {
220		ret = -ENOMEM;
221		goto err_map_addr;
222	}
223
224	get_random_bytes(buf, reg->size);
225	reg->checksum = crc32_le(~0, buf, reg->size);
226
227	memcpy_toio(dst_addr, buf, reg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
229	/*
230	 * wait 1ms inorder for the write to complete. Without this delay L3
231	 * error in observed in the host system.
232	 */
233	usleep_range(1000, 2000);
234
 
235	kfree(buf);
236
237err_map_addr:
238	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
239
240err_addr:
241	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
242
243err:
244	return ret;
245}
246
247static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
248				   u16 irq)
249{
250	struct pci_epf *epf = epf_test->epf;
251	struct device *dev = &epf->dev;
252	struct pci_epc *epc = epf->epc;
253	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
254	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
255
256	reg->status |= STATUS_IRQ_RAISED;
257
258	switch (irq_type) {
259	case IRQ_TYPE_LEGACY:
260		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
261		break;
262	case IRQ_TYPE_MSI:
263		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
264		break;
265	case IRQ_TYPE_MSIX:
266		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
267		break;
268	default:
269		dev_err(dev, "Failed to raise IRQ, unknown type\n");
270		break;
271	}
272}
273
274static void pci_epf_test_cmd_handler(struct work_struct *work)
275{
276	int ret;
277	int count;
278	u32 command;
279	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
280						     cmd_handler.work);
281	struct pci_epf *epf = epf_test->epf;
282	struct device *dev = &epf->dev;
283	struct pci_epc *epc = epf->epc;
284	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
285	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
286
287	command = reg->command;
288	if (!command)
289		goto reset_handler;
290
291	reg->command = 0;
292	reg->status = 0;
293
294	if (reg->irq_type > IRQ_TYPE_MSIX) {
295		dev_err(dev, "Failed to detect IRQ type\n");
296		goto reset_handler;
297	}
298
299	if (command & COMMAND_RAISE_LEGACY_IRQ) {
300		reg->status = STATUS_IRQ_RAISED;
301		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
302		goto reset_handler;
303	}
304
305	if (command & COMMAND_WRITE) {
306		ret = pci_epf_test_write(epf_test);
307		if (ret)
308			reg->status |= STATUS_WRITE_FAIL;
309		else
310			reg->status |= STATUS_WRITE_SUCCESS;
311		pci_epf_test_raise_irq(epf_test, reg->irq_type,
312				       reg->irq_number);
313		goto reset_handler;
314	}
315
316	if (command & COMMAND_READ) {
317		ret = pci_epf_test_read(epf_test);
318		if (!ret)
319			reg->status |= STATUS_READ_SUCCESS;
320		else
321			reg->status |= STATUS_READ_FAIL;
322		pci_epf_test_raise_irq(epf_test, reg->irq_type,
323				       reg->irq_number);
324		goto reset_handler;
325	}
326
327	if (command & COMMAND_COPY) {
328		ret = pci_epf_test_copy(epf_test);
329		if (!ret)
330			reg->status |= STATUS_COPY_SUCCESS;
331		else
332			reg->status |= STATUS_COPY_FAIL;
333		pci_epf_test_raise_irq(epf_test, reg->irq_type,
334				       reg->irq_number);
335		goto reset_handler;
336	}
337
338	if (command & COMMAND_RAISE_MSI_IRQ) {
339		count = pci_epc_get_msi(epc, epf->func_no);
340		if (reg->irq_number > count || count <= 0)
341			goto reset_handler;
342		reg->status = STATUS_IRQ_RAISED;
343		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
344				  reg->irq_number);
345		goto reset_handler;
346	}
347
348	if (command & COMMAND_RAISE_MSIX_IRQ) {
349		count = pci_epc_get_msix(epc, epf->func_no);
350		if (reg->irq_number > count || count <= 0)
351			goto reset_handler;
352		reg->status = STATUS_IRQ_RAISED;
353		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
354				  reg->irq_number);
355		goto reset_handler;
356	}
357
358reset_handler:
359	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
360			   msecs_to_jiffies(1));
361}
362
363static void pci_epf_test_linkup(struct pci_epf *epf)
364{
365	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
366
367	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
368			   msecs_to_jiffies(1));
369}
370
371static void pci_epf_test_unbind(struct pci_epf *epf)
372{
373	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
374	struct pci_epc *epc = epf->epc;
375	struct pci_epf_bar *epf_bar;
376	int bar;
377
378	cancel_delayed_work(&epf_test->cmd_handler);
 
379	pci_epc_stop(epc);
380	for (bar = BAR_0; bar <= BAR_5; bar++) {
381		epf_bar = &epf->bar[bar];
382
383		if (epf_test->reg[bar]) {
384			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
385			pci_epf_free_space(epf, epf_test->reg[bar], bar);
 
386		}
387	}
388}
389
390static int pci_epf_test_set_bar(struct pci_epf *epf)
391{
392	int bar, add;
393	int ret;
394	struct pci_epf_bar *epf_bar;
395	struct pci_epc *epc = epf->epc;
396	struct device *dev = &epf->dev;
397	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
398	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
399	const struct pci_epc_features *epc_features;
400
401	epc_features = epf_test->epc_features;
402
403	for (bar = BAR_0; bar <= BAR_5; bar += add) {
404		epf_bar = &epf->bar[bar];
405		/*
406		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
407		 * if the specific implementation required a 64-bit BAR,
408		 * even if we only requested a 32-bit BAR.
409		 */
410		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
411
412		if (!!(epc_features->reserved_bar & (1 << bar)))
413			continue;
414
415		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
416		if (ret) {
417			pci_epf_free_space(epf, epf_test->reg[bar], bar);
 
418			dev_err(dev, "Failed to set BAR%d\n", bar);
419			if (bar == test_reg_bar)
420				return ret;
421		}
422	}
423
424	return 0;
425}
426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427static int pci_epf_test_alloc_space(struct pci_epf *epf)
428{
429	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
430	struct device *dev = &epf->dev;
431	struct pci_epf_bar *epf_bar;
 
 
 
 
432	void *base;
433	int bar, add;
434	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
435	const struct pci_epc_features *epc_features;
436	size_t test_reg_size;
437
438	epc_features = epf_test->epc_features;
439
440	if (epc_features->bar_fixed_size[test_reg_bar])
 
 
 
 
 
 
 
 
 
 
 
 
 
441		test_reg_size = bar_size[test_reg_bar];
442	else
443		test_reg_size = sizeof(struct pci_epf_test_reg);
444
445	base = pci_epf_alloc_space(epf, test_reg_size,
446				   test_reg_bar, epc_features->align);
447	if (!base) {
448		dev_err(dev, "Failed to allocated register space\n");
449		return -ENOMEM;
450	}
451	epf_test->reg[test_reg_bar] = base;
452
453	for (bar = BAR_0; bar <= BAR_5; bar += add) {
454		epf_bar = &epf->bar[bar];
455		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
456
457		if (bar == test_reg_bar)
458			continue;
459
460		if (!!(epc_features->reserved_bar & (1 << bar)))
461			continue;
462
463		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
464					   epc_features->align);
 
465		if (!base)
466			dev_err(dev, "Failed to allocate space for BAR%d\n",
467				bar);
468		epf_test->reg[bar] = base;
469	}
470
471	return 0;
472}
473
474static void pci_epf_configure_bar(struct pci_epf *epf,
475				  const struct pci_epc_features *epc_features)
476{
477	struct pci_epf_bar *epf_bar;
478	bool bar_fixed_64bit;
479	int i;
480
481	for (i = BAR_0; i <= BAR_5; i++) {
482		epf_bar = &epf->bar[i];
483		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
484		if (bar_fixed_64bit)
485			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
486		if (epc_features->bar_fixed_size[i])
487			bar_size[i] = epc_features->bar_fixed_size[i];
488	}
489}
490
491static int pci_epf_test_bind(struct pci_epf *epf)
492{
493	int ret;
494	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
495	struct pci_epf_header *header = epf->header;
496	const struct pci_epc_features *epc_features;
497	enum pci_barno test_reg_bar = BAR_0;
498	struct pci_epc *epc = epf->epc;
499	struct device *dev = &epf->dev;
500	bool linkup_notifier = false;
501	bool msix_capable = false;
502	bool msi_capable = true;
503
504	if (WARN_ON_ONCE(!epc))
505		return -EINVAL;
506
507	epc_features = pci_epc_get_features(epc, epf->func_no);
508	if (epc_features) {
509		linkup_notifier = epc_features->linkup_notifier;
510		msix_capable = epc_features->msix_capable;
511		msi_capable = epc_features->msi_capable;
512		test_reg_bar = pci_epc_get_first_free_bar(epc_features);
513		pci_epf_configure_bar(epf, epc_features);
514	}
515
 
 
 
 
 
 
 
516	epf_test->test_reg_bar = test_reg_bar;
517	epf_test->epc_features = epc_features;
518
519	ret = pci_epc_write_header(epc, epf->func_no, header);
520	if (ret) {
521		dev_err(dev, "Configuration header write failed\n");
522		return ret;
523	}
524
525	ret = pci_epf_test_alloc_space(epf);
526	if (ret)
527		return ret;
528
529	ret = pci_epf_test_set_bar(epf);
530	if (ret)
531		return ret;
532
533	if (msi_capable) {
534		ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
535		if (ret) {
536			dev_err(dev, "MSI configuration failed\n");
537			return ret;
538		}
539	}
540
541	if (msix_capable) {
542		ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
543		if (ret) {
544			dev_err(dev, "MSI-X configuration failed\n");
545			return ret;
546		}
547	}
548
549	if (!linkup_notifier)
 
 
 
 
 
 
 
550		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
 
551
552	return 0;
553}
554
555static const struct pci_epf_device_id pci_epf_test_ids[] = {
556	{
557		.name = "pci_epf_test",
558	},
559	{},
560};
561
562static int pci_epf_test_probe(struct pci_epf *epf)
563{
564	struct pci_epf_test *epf_test;
565	struct device *dev = &epf->dev;
566
567	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
568	if (!epf_test)
569		return -ENOMEM;
570
571	epf->header = &test_header;
572	epf_test->epf = epf;
573
574	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
575
576	epf_set_drvdata(epf, epf_test);
577	return 0;
578}
579
580static struct pci_epf_ops ops = {
581	.unbind	= pci_epf_test_unbind,
582	.bind	= pci_epf_test_bind,
583	.linkup = pci_epf_test_linkup,
584};
585
586static struct pci_epf_driver test_driver = {
587	.driver.name	= "pci_epf_test",
588	.probe		= pci_epf_test_probe,
589	.id_table	= pci_epf_test_ids,
590	.ops		= &ops,
591	.owner		= THIS_MODULE,
592};
593
594static int __init pci_epf_test_init(void)
595{
596	int ret;
597
598	kpcitest_workqueue = alloc_workqueue("kpcitest",
599					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
600	if (!kpcitest_workqueue) {
601		pr_err("Failed to allocate the kpcitest work queue\n");
602		return -ENOMEM;
603	}
604
605	ret = pci_epf_register_driver(&test_driver);
606	if (ret) {
 
607		pr_err("Failed to register pci epf test driver --> %d\n", ret);
608		return ret;
609	}
610
611	return 0;
612}
613module_init(pci_epf_test_init);
614
615static void __exit pci_epf_test_exit(void)
616{
 
 
617	pci_epf_unregister_driver(&test_driver);
618}
619module_exit(pci_epf_test_exit);
620
621MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
622MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
623MODULE_LICENSE("GPL v2");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Test driver to test endpoint functionality
  4 *
  5 * Copyright (C) 2017 Texas Instruments
  6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7 */
  8
  9#include <linux/crc32.h>
 10#include <linux/delay.h>
 11#include <linux/dmaengine.h>
 12#include <linux/io.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/pci_ids.h>
 16#include <linux/random.h>
 17
 18#include <linux/pci-epc.h>
 19#include <linux/pci-epf.h>
 20#include <linux/pci_regs.h>
 21
 22#define IRQ_TYPE_LEGACY			0
 23#define IRQ_TYPE_MSI			1
 24#define IRQ_TYPE_MSIX			2
 25
 26#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
 27#define COMMAND_RAISE_MSI_IRQ		BIT(1)
 28#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
 29#define COMMAND_READ			BIT(3)
 30#define COMMAND_WRITE			BIT(4)
 31#define COMMAND_COPY			BIT(5)
 32
 33#define STATUS_READ_SUCCESS		BIT(0)
 34#define STATUS_READ_FAIL		BIT(1)
 35#define STATUS_WRITE_SUCCESS		BIT(2)
 36#define STATUS_WRITE_FAIL		BIT(3)
 37#define STATUS_COPY_SUCCESS		BIT(4)
 38#define STATUS_COPY_FAIL		BIT(5)
 39#define STATUS_IRQ_RAISED		BIT(6)
 40#define STATUS_SRC_ADDR_INVALID		BIT(7)
 41#define STATUS_DST_ADDR_INVALID		BIT(8)
 42
 43#define FLAG_USE_DMA			BIT(0)
 44
 45#define TIMER_RESOLUTION		1
 46
 47static struct workqueue_struct *kpcitest_workqueue;
 48
 49struct pci_epf_test {
 50	void			*reg[PCI_STD_NUM_BARS];
 51	struct pci_epf		*epf;
 52	enum pci_barno		test_reg_bar;
 53	size_t			msix_table_offset;
 54	struct delayed_work	cmd_handler;
 55	struct dma_chan		*dma_chan;
 56	struct completion	transfer_complete;
 57	bool			dma_supported;
 58	const struct pci_epc_features *epc_features;
 59};
 60
 61struct pci_epf_test_reg {
 62	u32	magic;
 63	u32	command;
 64	u32	status;
 65	u64	src_addr;
 66	u64	dst_addr;
 67	u32	size;
 68	u32	checksum;
 69	u32	irq_type;
 70	u32	irq_number;
 71	u32	flags;
 72} __packed;
 73
 74static struct pci_epf_header test_header = {
 75	.vendorid	= PCI_ANY_ID,
 76	.deviceid	= PCI_ANY_ID,
 77	.baseclass_code = PCI_CLASS_OTHERS,
 78	.interrupt_pin	= PCI_INTERRUPT_INTA,
 79};
 80
 81static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
 82
 83static void pci_epf_test_dma_callback(void *param)
 84{
 85	struct pci_epf_test *epf_test = param;
 86
 87	complete(&epf_test->transfer_complete);
 88}
 89
 90/**
 91 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
 92 *				  data between PCIe EP and remote PCIe RC
 93 * @epf_test: the EPF test device that performs the data transfer operation
 94 * @dma_dst: The destination address of the data transfer. It can be a physical
 95 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 96 * @dma_src: The source address of the data transfer. It can be a physical
 97 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 98 * @len: The size of the data transfer
 99 *
100 * Function that uses dmaengine API to transfer data between PCIe EP and remote
101 * PCIe RC. The source and destination address can be a physical address given
102 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
103 *
104 * The function returns '0' on success and negative value on failure.
105 */
106static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
107				      dma_addr_t dma_dst, dma_addr_t dma_src,
108				      size_t len)
109{
110	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
111	struct dma_chan *chan = epf_test->dma_chan;
112	struct pci_epf *epf = epf_test->epf;
113	struct dma_async_tx_descriptor *tx;
114	struct device *dev = &epf->dev;
115	dma_cookie_t cookie;
116	int ret;
117
118	if (IS_ERR_OR_NULL(chan)) {
119		dev_err(dev, "Invalid DMA memcpy channel\n");
120		return -EINVAL;
121	}
122
123	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
124	if (!tx) {
125		dev_err(dev, "Failed to prepare DMA memcpy\n");
126		return -EIO;
127	}
128
129	tx->callback = pci_epf_test_dma_callback;
130	tx->callback_param = epf_test;
131	cookie = tx->tx_submit(tx);
132	reinit_completion(&epf_test->transfer_complete);
133
134	ret = dma_submit_error(cookie);
135	if (ret) {
136		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
137		return -EIO;
138	}
139
140	dma_async_issue_pending(chan);
141	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
142	if (ret < 0) {
143		dmaengine_terminate_sync(chan);
144		dev_err(dev, "DMA wait_for_completion_timeout\n");
145		return -ETIMEDOUT;
146	}
147
148	return 0;
149}
150
151/**
152 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
153 * @epf_test: the EPF test device that performs data transfer operation
154 *
155 * Function to initialize EPF test DMA channel.
156 */
157static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
158{
159	struct pci_epf *epf = epf_test->epf;
160	struct device *dev = &epf->dev;
161	struct dma_chan *dma_chan;
162	dma_cap_mask_t mask;
163	int ret;
164
165	dma_cap_zero(mask);
166	dma_cap_set(DMA_MEMCPY, mask);
167
168	dma_chan = dma_request_chan_by_mask(&mask);
169	if (IS_ERR(dma_chan)) {
170		ret = PTR_ERR(dma_chan);
171		if (ret != -EPROBE_DEFER)
172			dev_err(dev, "Failed to get DMA channel\n");
173		return ret;
174	}
175	init_completion(&epf_test->transfer_complete);
176
177	epf_test->dma_chan = dma_chan;
178
179	return 0;
180}
181
182/**
183 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
184 * @epf_test: the EPF test device that performs data transfer operation
185 *
186 * Helper to cleanup EPF test DMA channel.
187 */
188static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
189{
190	if (!epf_test->dma_supported)
191		return;
192
193	dma_release_channel(epf_test->dma_chan);
194	epf_test->dma_chan = NULL;
195}
196
197static void pci_epf_test_print_rate(const char *ops, u64 size,
198				    struct timespec64 *start,
199				    struct timespec64 *end, bool dma)
200{
201	struct timespec64 ts;
202	u64 rate, ns;
203
204	ts = timespec64_sub(*end, *start);
205
206	/* convert both size (stored in 'rate') and time in terms of 'ns' */
207	ns = timespec64_to_ns(&ts);
208	rate = size * NSEC_PER_SEC;
209
210	/* Divide both size (stored in 'rate') and ns by a common factor */
211	while (ns > UINT_MAX) {
212		rate >>= 1;
213		ns >>= 1;
214	}
215
216	if (!ns)
217		return;
218
219	/* calculate the rate */
220	do_div(rate, (uint32_t)ns);
221
222	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
223		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
224		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
225}
226
227static int pci_epf_test_copy(struct pci_epf_test *epf_test)
228{
229	int ret;
230	bool use_dma;
231	void __iomem *src_addr;
232	void __iomem *dst_addr;
233	phys_addr_t src_phys_addr;
234	phys_addr_t dst_phys_addr;
235	struct timespec64 start, end;
236	struct pci_epf *epf = epf_test->epf;
237	struct device *dev = &epf->dev;
238	struct pci_epc *epc = epf->epc;
239	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
240	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
241
242	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
243	if (!src_addr) {
244		dev_err(dev, "Failed to allocate source address\n");
245		reg->status = STATUS_SRC_ADDR_INVALID;
246		ret = -ENOMEM;
247		goto err;
248	}
249
250	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
251			       reg->size);
252	if (ret) {
253		dev_err(dev, "Failed to map source address\n");
254		reg->status = STATUS_SRC_ADDR_INVALID;
255		goto err_src_addr;
256	}
257
258	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
259	if (!dst_addr) {
260		dev_err(dev, "Failed to allocate destination address\n");
261		reg->status = STATUS_DST_ADDR_INVALID;
262		ret = -ENOMEM;
263		goto err_src_map_addr;
264	}
265
266	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
267			       reg->size);
268	if (ret) {
269		dev_err(dev, "Failed to map destination address\n");
270		reg->status = STATUS_DST_ADDR_INVALID;
271		goto err_dst_addr;
272	}
273
274	ktime_get_ts64(&start);
275	use_dma = !!(reg->flags & FLAG_USE_DMA);
276	if (use_dma) {
277		if (!epf_test->dma_supported) {
278			dev_err(dev, "Cannot transfer data using DMA\n");
279			ret = -EINVAL;
280			goto err_map_addr;
281		}
282
283		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
284						 src_phys_addr, reg->size);
285		if (ret)
286			dev_err(dev, "Data transfer failed\n");
287	} else {
288		memcpy(dst_addr, src_addr, reg->size);
289	}
290	ktime_get_ts64(&end);
291	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
292
293err_map_addr:
294	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
295
296err_dst_addr:
297	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
298
299err_src_map_addr:
300	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
301
302err_src_addr:
303	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
304
305err:
306	return ret;
307}
308
309static int pci_epf_test_read(struct pci_epf_test *epf_test)
310{
311	int ret;
312	void __iomem *src_addr;
313	void *buf;
314	u32 crc32;
315	bool use_dma;
316	phys_addr_t phys_addr;
317	phys_addr_t dst_phys_addr;
318	struct timespec64 start, end;
319	struct pci_epf *epf = epf_test->epf;
320	struct device *dev = &epf->dev;
321	struct pci_epc *epc = epf->epc;
322	struct device *dma_dev = epf->epc->dev.parent;
323	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
324	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
325
326	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
327	if (!src_addr) {
328		dev_err(dev, "Failed to allocate address\n");
329		reg->status = STATUS_SRC_ADDR_INVALID;
330		ret = -ENOMEM;
331		goto err;
332	}
333
334	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
335			       reg->size);
336	if (ret) {
337		dev_err(dev, "Failed to map address\n");
338		reg->status = STATUS_SRC_ADDR_INVALID;
339		goto err_addr;
340	}
341
342	buf = kzalloc(reg->size, GFP_KERNEL);
343	if (!buf) {
344		ret = -ENOMEM;
345		goto err_map_addr;
346	}
347
348	use_dma = !!(reg->flags & FLAG_USE_DMA);
349	if (use_dma) {
350		if (!epf_test->dma_supported) {
351			dev_err(dev, "Cannot transfer data using DMA\n");
352			ret = -EINVAL;
353			goto err_dma_map;
354		}
355
356		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
357					       DMA_FROM_DEVICE);
358		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
359			dev_err(dev, "Failed to map destination buffer addr\n");
360			ret = -ENOMEM;
361			goto err_dma_map;
362		}
363
364		ktime_get_ts64(&start);
365		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
366						 phys_addr, reg->size);
367		if (ret)
368			dev_err(dev, "Data transfer failed\n");
369		ktime_get_ts64(&end);
370
371		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
372				 DMA_FROM_DEVICE);
373	} else {
374		ktime_get_ts64(&start);
375		memcpy_fromio(buf, src_addr, reg->size);
376		ktime_get_ts64(&end);
377	}
378
379	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
380
381	crc32 = crc32_le(~0, buf, reg->size);
382	if (crc32 != reg->checksum)
383		ret = -EIO;
384
385err_dma_map:
386	kfree(buf);
387
388err_map_addr:
389	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
390
391err_addr:
392	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
393
394err:
395	return ret;
396}
397
398static int pci_epf_test_write(struct pci_epf_test *epf_test)
399{
400	int ret;
401	void __iomem *dst_addr;
402	void *buf;
403	bool use_dma;
404	phys_addr_t phys_addr;
405	phys_addr_t src_phys_addr;
406	struct timespec64 start, end;
407	struct pci_epf *epf = epf_test->epf;
408	struct device *dev = &epf->dev;
409	struct pci_epc *epc = epf->epc;
410	struct device *dma_dev = epf->epc->dev.parent;
411	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
412	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
413
414	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
415	if (!dst_addr) {
416		dev_err(dev, "Failed to allocate address\n");
417		reg->status = STATUS_DST_ADDR_INVALID;
418		ret = -ENOMEM;
419		goto err;
420	}
421
422	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
423			       reg->size);
424	if (ret) {
425		dev_err(dev, "Failed to map address\n");
426		reg->status = STATUS_DST_ADDR_INVALID;
427		goto err_addr;
428	}
429
430	buf = kzalloc(reg->size, GFP_KERNEL);
431	if (!buf) {
432		ret = -ENOMEM;
433		goto err_map_addr;
434	}
435
436	get_random_bytes(buf, reg->size);
437	reg->checksum = crc32_le(~0, buf, reg->size);
438
439	use_dma = !!(reg->flags & FLAG_USE_DMA);
440	if (use_dma) {
441		if (!epf_test->dma_supported) {
442			dev_err(dev, "Cannot transfer data using DMA\n");
443			ret = -EINVAL;
444			goto err_map_addr;
445		}
446
447		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
448					       DMA_TO_DEVICE);
449		if (dma_mapping_error(dma_dev, src_phys_addr)) {
450			dev_err(dev, "Failed to map source buffer addr\n");
451			ret = -ENOMEM;
452			goto err_dma_map;
453		}
454
455		ktime_get_ts64(&start);
456		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
457						 src_phys_addr, reg->size);
458		if (ret)
459			dev_err(dev, "Data transfer failed\n");
460		ktime_get_ts64(&end);
461
462		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
463				 DMA_TO_DEVICE);
464	} else {
465		ktime_get_ts64(&start);
466		memcpy_toio(dst_addr, buf, reg->size);
467		ktime_get_ts64(&end);
468	}
469
470	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
471
472	/*
473	 * wait 1ms inorder for the write to complete. Without this delay L3
474	 * error in observed in the host system.
475	 */
476	usleep_range(1000, 2000);
477
478err_dma_map:
479	kfree(buf);
480
481err_map_addr:
482	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
483
484err_addr:
485	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
486
487err:
488	return ret;
489}
490
491static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
492				   u16 irq)
493{
494	struct pci_epf *epf = epf_test->epf;
495	struct device *dev = &epf->dev;
496	struct pci_epc *epc = epf->epc;
497	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
498	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
499
500	reg->status |= STATUS_IRQ_RAISED;
501
502	switch (irq_type) {
503	case IRQ_TYPE_LEGACY:
504		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
505		break;
506	case IRQ_TYPE_MSI:
507		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
508		break;
509	case IRQ_TYPE_MSIX:
510		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
511		break;
512	default:
513		dev_err(dev, "Failed to raise IRQ, unknown type\n");
514		break;
515	}
516}
517
518static void pci_epf_test_cmd_handler(struct work_struct *work)
519{
520	int ret;
521	int count;
522	u32 command;
523	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
524						     cmd_handler.work);
525	struct pci_epf *epf = epf_test->epf;
526	struct device *dev = &epf->dev;
527	struct pci_epc *epc = epf->epc;
528	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
529	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
530
531	command = reg->command;
532	if (!command)
533		goto reset_handler;
534
535	reg->command = 0;
536	reg->status = 0;
537
538	if (reg->irq_type > IRQ_TYPE_MSIX) {
539		dev_err(dev, "Failed to detect IRQ type\n");
540		goto reset_handler;
541	}
542
543	if (command & COMMAND_RAISE_LEGACY_IRQ) {
544		reg->status = STATUS_IRQ_RAISED;
545		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
546		goto reset_handler;
547	}
548
549	if (command & COMMAND_WRITE) {
550		ret = pci_epf_test_write(epf_test);
551		if (ret)
552			reg->status |= STATUS_WRITE_FAIL;
553		else
554			reg->status |= STATUS_WRITE_SUCCESS;
555		pci_epf_test_raise_irq(epf_test, reg->irq_type,
556				       reg->irq_number);
557		goto reset_handler;
558	}
559
560	if (command & COMMAND_READ) {
561		ret = pci_epf_test_read(epf_test);
562		if (!ret)
563			reg->status |= STATUS_READ_SUCCESS;
564		else
565			reg->status |= STATUS_READ_FAIL;
566		pci_epf_test_raise_irq(epf_test, reg->irq_type,
567				       reg->irq_number);
568		goto reset_handler;
569	}
570
571	if (command & COMMAND_COPY) {
572		ret = pci_epf_test_copy(epf_test);
573		if (!ret)
574			reg->status |= STATUS_COPY_SUCCESS;
575		else
576			reg->status |= STATUS_COPY_FAIL;
577		pci_epf_test_raise_irq(epf_test, reg->irq_type,
578				       reg->irq_number);
579		goto reset_handler;
580	}
581
582	if (command & COMMAND_RAISE_MSI_IRQ) {
583		count = pci_epc_get_msi(epc, epf->func_no);
584		if (reg->irq_number > count || count <= 0)
585			goto reset_handler;
586		reg->status = STATUS_IRQ_RAISED;
587		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
588				  reg->irq_number);
589		goto reset_handler;
590	}
591
592	if (command & COMMAND_RAISE_MSIX_IRQ) {
593		count = pci_epc_get_msix(epc, epf->func_no);
594		if (reg->irq_number > count || count <= 0)
595			goto reset_handler;
596		reg->status = STATUS_IRQ_RAISED;
597		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
598				  reg->irq_number);
599		goto reset_handler;
600	}
601
602reset_handler:
603	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
604			   msecs_to_jiffies(1));
605}
606
 
 
 
 
 
 
 
 
607static void pci_epf_test_unbind(struct pci_epf *epf)
608{
609	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
610	struct pci_epc *epc = epf->epc;
611	struct pci_epf_bar *epf_bar;
612	int bar;
613
614	cancel_delayed_work(&epf_test->cmd_handler);
615	pci_epf_test_clean_dma_chan(epf_test);
616	pci_epc_stop(epc);
617	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
618		epf_bar = &epf->bar[bar];
619
620		if (epf_test->reg[bar]) {
621			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
622			pci_epf_free_space(epf, epf_test->reg[bar], bar,
623					   PRIMARY_INTERFACE);
624		}
625	}
626}
627
628static int pci_epf_test_set_bar(struct pci_epf *epf)
629{
630	int bar, add;
631	int ret;
632	struct pci_epf_bar *epf_bar;
633	struct pci_epc *epc = epf->epc;
634	struct device *dev = &epf->dev;
635	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
636	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
637	const struct pci_epc_features *epc_features;
638
639	epc_features = epf_test->epc_features;
640
641	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
642		epf_bar = &epf->bar[bar];
643		/*
644		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
645		 * if the specific implementation required a 64-bit BAR,
646		 * even if we only requested a 32-bit BAR.
647		 */
648		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
649
650		if (!!(epc_features->reserved_bar & (1 << bar)))
651			continue;
652
653		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
654		if (ret) {
655			pci_epf_free_space(epf, epf_test->reg[bar], bar,
656					   PRIMARY_INTERFACE);
657			dev_err(dev, "Failed to set BAR%d\n", bar);
658			if (bar == test_reg_bar)
659				return ret;
660		}
661	}
662
663	return 0;
664}
665
666static int pci_epf_test_core_init(struct pci_epf *epf)
667{
668	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
669	struct pci_epf_header *header = epf->header;
670	const struct pci_epc_features *epc_features;
671	struct pci_epc *epc = epf->epc;
672	struct device *dev = &epf->dev;
673	bool msix_capable = false;
674	bool msi_capable = true;
675	int ret;
676
677	epc_features = pci_epc_get_features(epc, epf->func_no);
678	if (epc_features) {
679		msix_capable = epc_features->msix_capable;
680		msi_capable = epc_features->msi_capable;
681	}
682
683	ret = pci_epc_write_header(epc, epf->func_no, header);
684	if (ret) {
685		dev_err(dev, "Configuration header write failed\n");
686		return ret;
687	}
688
689	ret = pci_epf_test_set_bar(epf);
690	if (ret)
691		return ret;
692
693	if (msi_capable) {
694		ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
695		if (ret) {
696			dev_err(dev, "MSI configuration failed\n");
697			return ret;
698		}
699	}
700
701	if (msix_capable) {
702		ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
703				       epf_test->test_reg_bar,
704				       epf_test->msix_table_offset);
705		if (ret) {
706			dev_err(dev, "MSI-X configuration failed\n");
707			return ret;
708		}
709	}
710
711	return 0;
712}
713
714static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
715				 void *data)
716{
717	struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
718	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
719	int ret;
720
721	switch (val) {
722	case CORE_INIT:
723		ret = pci_epf_test_core_init(epf);
724		if (ret)
725			return NOTIFY_BAD;
726		break;
727
728	case LINK_UP:
729		queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
730				   msecs_to_jiffies(1));
731		break;
732
733	default:
734		dev_err(&epf->dev, "Invalid EPF test notifier event\n");
735		return NOTIFY_BAD;
736	}
737
738	return NOTIFY_OK;
739}
740
741static int pci_epf_test_alloc_space(struct pci_epf *epf)
742{
743	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
744	struct device *dev = &epf->dev;
745	struct pci_epf_bar *epf_bar;
746	size_t msix_table_size = 0;
747	size_t test_reg_bar_size;
748	size_t pba_size = 0;
749	bool msix_capable;
750	void *base;
751	int bar, add;
752	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
753	const struct pci_epc_features *epc_features;
754	size_t test_reg_size;
755
756	epc_features = epf_test->epc_features;
757
758	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
759
760	msix_capable = epc_features->msix_capable;
761	if (msix_capable) {
762		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
763		epf_test->msix_table_offset = test_reg_bar_size;
764		/* Align to QWORD or 8 Bytes */
765		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
766	}
767	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
768
769	if (epc_features->bar_fixed_size[test_reg_bar]) {
770		if (test_reg_size > bar_size[test_reg_bar])
771			return -ENOMEM;
772		test_reg_size = bar_size[test_reg_bar];
773	}
 
774
775	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
776				   epc_features->align, PRIMARY_INTERFACE);
777	if (!base) {
778		dev_err(dev, "Failed to allocated register space\n");
779		return -ENOMEM;
780	}
781	epf_test->reg[test_reg_bar] = base;
782
783	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
784		epf_bar = &epf->bar[bar];
785		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
786
787		if (bar == test_reg_bar)
788			continue;
789
790		if (!!(epc_features->reserved_bar & (1 << bar)))
791			continue;
792
793		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
794					   epc_features->align,
795					   PRIMARY_INTERFACE);
796		if (!base)
797			dev_err(dev, "Failed to allocate space for BAR%d\n",
798				bar);
799		epf_test->reg[bar] = base;
800	}
801
802	return 0;
803}
804
805static void pci_epf_configure_bar(struct pci_epf *epf,
806				  const struct pci_epc_features *epc_features)
807{
808	struct pci_epf_bar *epf_bar;
809	bool bar_fixed_64bit;
810	int i;
811
812	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
813		epf_bar = &epf->bar[i];
814		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
815		if (bar_fixed_64bit)
816			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
817		if (epc_features->bar_fixed_size[i])
818			bar_size[i] = epc_features->bar_fixed_size[i];
819	}
820}
821
822static int pci_epf_test_bind(struct pci_epf *epf)
823{
824	int ret;
825	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
 
826	const struct pci_epc_features *epc_features;
827	enum pci_barno test_reg_bar = BAR_0;
828	struct pci_epc *epc = epf->epc;
 
829	bool linkup_notifier = false;
830	bool core_init_notifier = false;
 
831
832	if (WARN_ON_ONCE(!epc))
833		return -EINVAL;
834
835	epc_features = pci_epc_get_features(epc, epf->func_no);
836	if (!epc_features) {
837		dev_err(&epf->dev, "epc_features not implemented\n");
838		return -EOPNOTSUPP;
 
 
 
839	}
840
841	linkup_notifier = epc_features->linkup_notifier;
842	core_init_notifier = epc_features->core_init_notifier;
843	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
844	if (test_reg_bar < 0)
845		return -EINVAL;
846	pci_epf_configure_bar(epf, epc_features);
847
848	epf_test->test_reg_bar = test_reg_bar;
849	epf_test->epc_features = epc_features;
850
 
 
 
 
 
 
851	ret = pci_epf_test_alloc_space(epf);
852	if (ret)
853		return ret;
854
855	if (!core_init_notifier) {
856		ret = pci_epf_test_core_init(epf);
857		if (ret)
 
 
 
 
 
858			return ret;
 
859	}
860
861	epf_test->dma_supported = true;
 
 
 
 
 
 
862
863	ret = pci_epf_test_init_dma_chan(epf_test);
864	if (ret)
865		epf_test->dma_supported = false;
866
867	if (linkup_notifier) {
868		epf->nb.notifier_call = pci_epf_test_notifier;
869		pci_epc_register_notifier(epc, &epf->nb);
870	} else {
871		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
872	}
873
874	return 0;
875}
876
877static const struct pci_epf_device_id pci_epf_test_ids[] = {
878	{
879		.name = "pci_epf_test",
880	},
881	{},
882};
883
884static int pci_epf_test_probe(struct pci_epf *epf)
885{
886	struct pci_epf_test *epf_test;
887	struct device *dev = &epf->dev;
888
889	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
890	if (!epf_test)
891		return -ENOMEM;
892
893	epf->header = &test_header;
894	epf_test->epf = epf;
895
896	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
897
898	epf_set_drvdata(epf, epf_test);
899	return 0;
900}
901
902static struct pci_epf_ops ops = {
903	.unbind	= pci_epf_test_unbind,
904	.bind	= pci_epf_test_bind,
 
905};
906
907static struct pci_epf_driver test_driver = {
908	.driver.name	= "pci_epf_test",
909	.probe		= pci_epf_test_probe,
910	.id_table	= pci_epf_test_ids,
911	.ops		= &ops,
912	.owner		= THIS_MODULE,
913};
914
915static int __init pci_epf_test_init(void)
916{
917	int ret;
918
919	kpcitest_workqueue = alloc_workqueue("kpcitest",
920					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
921	if (!kpcitest_workqueue) {
922		pr_err("Failed to allocate the kpcitest work queue\n");
923		return -ENOMEM;
924	}
925
926	ret = pci_epf_register_driver(&test_driver);
927	if (ret) {
928		destroy_workqueue(kpcitest_workqueue);
929		pr_err("Failed to register pci epf test driver --> %d\n", ret);
930		return ret;
931	}
932
933	return 0;
934}
935module_init(pci_epf_test_init);
936
937static void __exit pci_epf_test_exit(void)
938{
939	if (kpcitest_workqueue)
940		destroy_workqueue(kpcitest_workqueue);
941	pci_epf_unregister_driver(&test_driver);
942}
943module_exit(pci_epf_test_exit);
944
945MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
946MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
947MODULE_LICENSE("GPL v2");