Linux Audio

Check our new training course

Loading...
v4.17
 
  1/**
  2 * Host side test driver to test endpoint functionality
  3 *
  4 * Copyright (C) 2017 Texas Instruments
  5 * Author: Kishon Vijay Abraham I <kishon@ti.com>
  6 *
  7 * This program is free software: you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 of
  9 * the License as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 20#include <linux/crc32.h>
 21#include <linux/delay.h>
 22#include <linux/fs.h>
 23#include <linux/io.h>
 24#include <linux/interrupt.h>
 25#include <linux/irq.h>
 26#include <linux/miscdevice.h>
 27#include <linux/module.h>
 28#include <linux/mutex.h>
 29#include <linux/random.h>
 30#include <linux/slab.h>
 
 31#include <linux/pci.h>
 32#include <linux/pci_ids.h>
 33
 34#include <linux/pci_regs.h>
 35
 36#include <uapi/linux/pcitest.h>
 37
 38#define DRV_MODULE_NAME			"pci-endpoint-test"
 39
 40#define PCI_ENDPOINT_TEST_MAGIC		0x0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41
 42#define PCI_ENDPOINT_TEST_COMMAND	0x4
 43#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
 44#define COMMAND_RAISE_MSI_IRQ		BIT(1)
 45#define MSI_NUMBER_SHIFT		2
 46/* 6 bits for MSI number */
 47#define COMMAND_READ                    BIT(8)
 48#define COMMAND_WRITE                   BIT(9)
 49#define COMMAND_COPY                    BIT(10)
 50
 51#define PCI_ENDPOINT_TEST_STATUS	0x8
 52#define STATUS_READ_SUCCESS             BIT(0)
 53#define STATUS_READ_FAIL                BIT(1)
 54#define STATUS_WRITE_SUCCESS            BIT(2)
 55#define STATUS_WRITE_FAIL               BIT(3)
 56#define STATUS_COPY_SUCCESS             BIT(4)
 57#define STATUS_COPY_FAIL                BIT(5)
 58#define STATUS_IRQ_RAISED               BIT(6)
 59#define STATUS_SRC_ADDR_INVALID         BIT(7)
 60#define STATUS_DST_ADDR_INVALID         BIT(8)
 61
 62#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0xc
 63#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
 64
 65#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
 66#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
 67
 68#define PCI_ENDPOINT_TEST_SIZE		0x1c
 69#define PCI_ENDPOINT_TEST_CHECKSUM	0x20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70
 71static DEFINE_IDA(pci_endpoint_test_ida);
 72
 73#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
 74					    miscdev)
 75
 76static bool no_msi;
 77module_param(no_msi, bool, 0444);
 78MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
 79
 
 
 
 
 80enum pci_barno {
 81	BAR_0,
 82	BAR_1,
 83	BAR_2,
 84	BAR_3,
 85	BAR_4,
 86	BAR_5,
 87};
 88
 89struct pci_endpoint_test {
 90	struct pci_dev	*pdev;
 91	void __iomem	*base;
 92	void __iomem	*bar[6];
 93	struct completion irq_raised;
 94	int		last_irq;
 95	int		num_irqs;
 
 96	/* mutex to protect the ioctls */
 97	struct mutex	mutex;
 98	struct miscdevice miscdev;
 99	enum pci_barno test_reg_bar;
100	size_t alignment;
 
101};
102
103struct pci_endpoint_test_data {
104	enum pci_barno test_reg_bar;
105	size_t alignment;
106	bool no_msi;
107};
108
109static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
110					  u32 offset)
111{
112	return readl(test->base + offset);
113}
114
115static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
116					    u32 offset, u32 value)
117{
118	writel(value, test->base + offset);
119}
120
121static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
122					      int bar, int offset)
123{
124	return readl(test->bar[bar] + offset);
125}
126
127static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
128						int bar, u32 offset, u32 value)
129{
130	writel(value, test->bar[bar] + offset);
131}
132
133static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
134{
135	struct pci_endpoint_test *test = dev_id;
136	u32 reg;
137
138	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
139	if (reg & STATUS_IRQ_RAISED) {
140		test->last_irq = irq;
141		complete(&test->irq_raised);
142		reg &= ~STATUS_IRQ_RAISED;
143	}
144	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
145				 reg);
146
147	return IRQ_HANDLED;
148}
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
151				  enum pci_barno barno)
152{
153	int j;
154	u32 val;
155	int size;
156	struct pci_dev *pdev = test->pdev;
157
158	if (!test->bar[barno])
159		return false;
160
161	size = pci_resource_len(pdev, barno);
162
163	if (barno == test->test_reg_bar)
164		size = 0x4;
165
166	for (j = 0; j < size; j += 4)
167		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
168
169	for (j = 0; j < size; j += 4) {
170		val = pci_endpoint_test_bar_readl(test, barno, j);
171		if (val != 0xA0A0A0A0)
172			return false;
173	}
174
175	return true;
176}
177
178static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
179{
180	u32 val;
181
 
 
 
182	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
183				 COMMAND_RAISE_LEGACY_IRQ);
184	val = wait_for_completion_timeout(&test->irq_raised,
185					  msecs_to_jiffies(1000));
186	if (!val)
187		return false;
188
189	return true;
190}
191
192static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
193				      u8 msi_num)
194{
195	u32 val;
196	struct pci_dev *pdev = test->pdev;
197
 
 
 
 
198	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
199				 msi_num << MSI_NUMBER_SHIFT |
200				 COMMAND_RAISE_MSI_IRQ);
201	val = wait_for_completion_timeout(&test->irq_raised,
202					  msecs_to_jiffies(1000));
203	if (!val)
204		return false;
205
206	if (test->last_irq - pdev->irq == msi_num - 1)
207		return true;
208
209	return false;
210}
211
212static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213{
 
214	bool ret = false;
215	void *src_addr;
216	void *dst_addr;
 
 
 
217	dma_addr_t src_phys_addr;
218	dma_addr_t dst_phys_addr;
219	struct pci_dev *pdev = test->pdev;
220	struct device *dev = &pdev->dev;
221	void *orig_src_addr;
222	dma_addr_t orig_src_phys_addr;
223	void *orig_dst_addr;
224	dma_addr_t orig_dst_phys_addr;
225	size_t offset;
226	size_t alignment = test->alignment;
 
227	u32 src_crc32;
228	u32 dst_crc32;
 
 
 
 
 
 
 
229
230	if (size > SIZE_MAX - alignment)
 
 
 
 
 
 
 
 
 
 
 
231		goto err;
 
232
233	orig_src_addr = dma_alloc_coherent(dev, size + alignment,
234					   &orig_src_phys_addr, GFP_KERNEL);
235	if (!orig_src_addr) {
236		dev_err(dev, "failed to allocate source buffer\n");
237		ret = false;
238		goto err;
239	}
240
 
 
 
 
 
 
 
 
 
241	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
242		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
243		offset = src_phys_addr - orig_src_phys_addr;
244		src_addr = orig_src_addr + offset;
245	} else {
246		src_phys_addr = orig_src_phys_addr;
247		src_addr = orig_src_addr;
248	}
249
250	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
251				 lower_32_bits(src_phys_addr));
252
253	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
254				 upper_32_bits(src_phys_addr));
255
256	get_random_bytes(src_addr, size);
257	src_crc32 = crc32_le(~0, src_addr, size);
258
259	orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
260					   &orig_dst_phys_addr, GFP_KERNEL);
261	if (!orig_dst_addr) {
262		dev_err(dev, "failed to allocate destination address\n");
 
 
 
 
 
 
 
 
263		ret = false;
264		goto err_orig_src_addr;
265	}
266
267	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
268		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
269		offset = dst_phys_addr - orig_dst_phys_addr;
270		dst_addr = orig_dst_addr + offset;
271	} else {
272		dst_phys_addr = orig_dst_phys_addr;
273		dst_addr = orig_dst_addr;
274	}
275
276	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
277				 lower_32_bits(dst_phys_addr));
278	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
279				 upper_32_bits(dst_phys_addr));
280
281	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
282				 size);
283
 
 
 
284	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
285				 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
286
287	wait_for_completion(&test->irq_raised);
288
 
 
 
289	dst_crc32 = crc32_le(~0, dst_addr, size);
290	if (dst_crc32 == src_crc32)
291		ret = true;
292
293	dma_free_coherent(dev, size + alignment, orig_dst_addr,
294			  orig_dst_phys_addr);
 
 
 
 
295
296err_orig_src_addr:
297	dma_free_coherent(dev, size + alignment, orig_src_addr,
298			  orig_src_phys_addr);
299
300err:
301	return ret;
302}
303
304static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
 
305{
 
306	bool ret = false;
 
 
307	u32 reg;
308	void *addr;
309	dma_addr_t phys_addr;
310	struct pci_dev *pdev = test->pdev;
311	struct device *dev = &pdev->dev;
312	void *orig_addr;
313	dma_addr_t orig_phys_addr;
314	size_t offset;
315	size_t alignment = test->alignment;
 
 
316	u32 crc32;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
318	if (size > SIZE_MAX - alignment)
 
319		goto err;
 
320
321	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
322				       GFP_KERNEL);
323	if (!orig_addr) {
324		dev_err(dev, "failed to allocate address\n");
325		ret = false;
326		goto err;
327	}
328
 
 
 
 
 
 
 
 
 
 
329	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
330		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
331		offset = phys_addr - orig_phys_addr;
332		addr = orig_addr + offset;
333	} else {
334		phys_addr = orig_phys_addr;
335		addr = orig_addr;
336	}
337
338	get_random_bytes(addr, size);
339
340	crc32 = crc32_le(~0, addr, size);
341	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
342				 crc32);
343
344	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
345				 lower_32_bits(phys_addr));
346	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
347				 upper_32_bits(phys_addr));
348
349	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
350
 
 
 
351	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
352				 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
353
354	wait_for_completion(&test->irq_raised);
355
356	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
357	if (reg & STATUS_READ_SUCCESS)
358		ret = true;
359
360	dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
 
 
 
 
361
362err:
363	return ret;
364}
365
366static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
 
367{
 
368	bool ret = false;
 
 
 
369	void *addr;
370	dma_addr_t phys_addr;
371	struct pci_dev *pdev = test->pdev;
372	struct device *dev = &pdev->dev;
373	void *orig_addr;
374	dma_addr_t orig_phys_addr;
375	size_t offset;
376	size_t alignment = test->alignment;
 
377	u32 crc32;
 
 
 
 
 
 
 
378
379	if (size > SIZE_MAX - alignment)
 
 
 
 
 
 
 
 
 
 
 
380		goto err;
 
381
382	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
383				       GFP_KERNEL);
384	if (!orig_addr) {
385		dev_err(dev, "failed to allocate destination address\n");
386		ret = false;
387		goto err;
388	}
389
 
 
 
 
 
 
 
 
390	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
391		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
392		offset = phys_addr - orig_phys_addr;
393		addr = orig_addr + offset;
394	} else {
395		phys_addr = orig_phys_addr;
396		addr = orig_addr;
397	}
398
399	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
400				 lower_32_bits(phys_addr));
401	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
402				 upper_32_bits(phys_addr));
403
404	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
405
 
 
 
406	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
407				 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
408
409	wait_for_completion(&test->irq_raised);
410
 
 
 
411	crc32 = crc32_le(~0, addr, size);
412	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
413		ret = true;
414
415	dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
 
416err:
417	return ret;
418}
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
421				    unsigned long arg)
422{
423	int ret = -EINVAL;
424	enum pci_barno bar;
425	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
 
426
427	mutex_lock(&test->mutex);
428	switch (cmd) {
429	case PCITEST_BAR:
430		bar = arg;
431		if (bar < 0 || bar > 5)
 
 
432			goto ret;
433		ret = pci_endpoint_test_bar(test, bar);
434		break;
435	case PCITEST_LEGACY_IRQ:
436		ret = pci_endpoint_test_legacy_irq(test);
437		break;
438	case PCITEST_MSI:
439		ret = pci_endpoint_test_msi_irq(test, arg);
 
440		break;
441	case PCITEST_WRITE:
442		ret = pci_endpoint_test_write(test, arg);
443		break;
444	case PCITEST_READ:
445		ret = pci_endpoint_test_read(test, arg);
446		break;
447	case PCITEST_COPY:
448		ret = pci_endpoint_test_copy(test, arg);
449		break;
 
 
 
 
 
 
 
 
 
450	}
451
452ret:
453	mutex_unlock(&test->mutex);
454	return ret;
455}
456
457static const struct file_operations pci_endpoint_test_fops = {
458	.owner = THIS_MODULE,
459	.unlocked_ioctl = pci_endpoint_test_ioctl,
460};
461
462static int pci_endpoint_test_probe(struct pci_dev *pdev,
463				   const struct pci_device_id *ent)
464{
465	int i;
466	int err;
467	int irq = 0;
468	int id;
469	char name[20];
470	enum pci_barno bar;
471	void __iomem *base;
472	struct device *dev = &pdev->dev;
473	struct pci_endpoint_test *test;
474	struct pci_endpoint_test_data *data;
475	enum pci_barno test_reg_bar = BAR_0;
476	struct miscdevice *misc_device;
477
478	if (pci_is_bridge(pdev))
479		return -ENODEV;
480
481	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
482	if (!test)
483		return -ENOMEM;
484
485	test->test_reg_bar = 0;
486	test->alignment = 0;
487	test->pdev = pdev;
 
 
 
 
488
489	data = (struct pci_endpoint_test_data *)ent->driver_data;
490	if (data) {
491		test_reg_bar = data->test_reg_bar;
 
492		test->alignment = data->alignment;
493		no_msi = data->no_msi;
494	}
495
496	init_completion(&test->irq_raised);
497	mutex_init(&test->mutex);
498
 
 
 
 
 
 
499	err = pci_enable_device(pdev);
500	if (err) {
501		dev_err(dev, "Cannot enable PCI device\n");
502		return err;
503	}
504
505	err = pci_request_regions(pdev, DRV_MODULE_NAME);
506	if (err) {
507		dev_err(dev, "Cannot obtain PCI resources\n");
508		goto err_disable_pdev;
509	}
510
511	pci_set_master(pdev);
512
513	if (!no_msi) {
514		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
515		if (irq < 0)
516			dev_err(dev, "failed to get MSI interrupts\n");
517		test->num_irqs = irq;
518	}
519
520	err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
521			       IRQF_SHARED, DRV_MODULE_NAME, test);
522	if (err) {
523		dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
524		goto err_disable_msi;
525	}
526
527	for (i = 1; i < irq; i++) {
528		err = devm_request_irq(dev, pdev->irq + i,
529				       pci_endpoint_test_irqhandler,
530				       IRQF_SHARED, DRV_MODULE_NAME, test);
531		if (err)
532			dev_err(dev, "failed to request IRQ %d for MSI %d\n",
533				pdev->irq + i, i + 1);
534	}
535
536	for (bar = BAR_0; bar <= BAR_5; bar++) {
537		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
538			base = pci_ioremap_bar(pdev, bar);
539			if (!base) {
540				dev_err(dev, "failed to read BAR%d\n", bar);
541				WARN_ON(bar == test_reg_bar);
542			}
543			test->bar[bar] = base;
544		}
545	}
546
547	test->base = test->bar[test_reg_bar];
548	if (!test->base) {
549		err = -ENOMEM;
550		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
551			test_reg_bar);
552		goto err_iounmap;
553	}
554
555	pci_set_drvdata(pdev, test);
556
557	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
558	if (id < 0) {
559		err = id;
560		dev_err(dev, "unable to get id\n");
561		goto err_iounmap;
562	}
563
564	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
 
 
 
 
 
 
 
 
 
 
 
565	misc_device = &test->miscdev;
566	misc_device->minor = MISC_DYNAMIC_MINOR;
567	misc_device->name = kstrdup(name, GFP_KERNEL);
568	if (!misc_device->name) {
569		err = -ENOMEM;
570		goto err_ida_remove;
571	}
572	misc_device->fops = &pci_endpoint_test_fops,
 
573
574	err = misc_register(misc_device);
575	if (err) {
576		dev_err(dev, "failed to register device\n");
577		goto err_kfree_name;
578	}
579
580	return 0;
581
582err_kfree_name:
583	kfree(misc_device->name);
584
 
 
 
 
 
 
585err_ida_remove:
586	ida_simple_remove(&pci_endpoint_test_ida, id);
587
588err_iounmap:
589	for (bar = BAR_0; bar <= BAR_5; bar++) {
590		if (test->bar[bar])
591			pci_iounmap(pdev, test->bar[bar]);
592	}
593
594	for (i = 0; i < irq; i++)
595		devm_free_irq(dev, pdev->irq + i, test);
596
597err_disable_msi:
598	pci_disable_msi(pdev);
599	pci_release_regions(pdev);
600
601err_disable_pdev:
602	pci_disable_device(pdev);
603
604	return err;
605}
606
607static void pci_endpoint_test_remove(struct pci_dev *pdev)
608{
609	int id;
610	int i;
611	enum pci_barno bar;
612	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
613	struct miscdevice *misc_device = &test->miscdev;
614
615	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
616		return;
617	if (id < 0)
618		return;
619
620	misc_deregister(&test->miscdev);
621	kfree(misc_device->name);
 
622	ida_simple_remove(&pci_endpoint_test_ida, id);
623	for (bar = BAR_0; bar <= BAR_5; bar++) {
624		if (test->bar[bar])
625			pci_iounmap(pdev, test->bar[bar]);
626	}
627	for (i = 0; i < test->num_irqs; i++)
628		devm_free_irq(&pdev->dev, pdev->irq + i, test);
629	pci_disable_msi(pdev);
 
630	pci_release_regions(pdev);
631	pci_disable_device(pdev);
632}
633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634static const struct pci_device_id pci_endpoint_test_tbl[] = {
635	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
636	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637	{ }
638};
639MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
640
641static struct pci_driver pci_endpoint_test_driver = {
642	.name		= DRV_MODULE_NAME,
643	.id_table	= pci_endpoint_test_tbl,
644	.probe		= pci_endpoint_test_probe,
645	.remove		= pci_endpoint_test_remove,
 
646};
647module_pci_driver(pci_endpoint_test_driver);
648
649MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
650MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
651MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/**
   3 * Host side test driver to test endpoint functionality
   4 *
   5 * Copyright (C) 2017 Texas Instruments
   6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/fs.h>
  12#include <linux/io.h>
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/miscdevice.h>
  16#include <linux/module.h>
  17#include <linux/mutex.h>
  18#include <linux/random.h>
  19#include <linux/slab.h>
  20#include <linux/uaccess.h>
  21#include <linux/pci.h>
  22#include <linux/pci_ids.h>
  23
  24#include <linux/pci_regs.h>
  25
  26#include <uapi/linux/pcitest.h>
  27
  28#define DRV_MODULE_NAME				"pci-endpoint-test"
  29
  30#define IRQ_TYPE_UNDEFINED			-1
  31#define IRQ_TYPE_LEGACY				0
  32#define IRQ_TYPE_MSI				1
  33#define IRQ_TYPE_MSIX				2
  34
  35#define PCI_ENDPOINT_TEST_MAGIC			0x0
  36
  37#define PCI_ENDPOINT_TEST_COMMAND		0x4
  38#define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
  39#define COMMAND_RAISE_MSI_IRQ			BIT(1)
  40#define COMMAND_RAISE_MSIX_IRQ			BIT(2)
  41#define COMMAND_READ				BIT(3)
  42#define COMMAND_WRITE				BIT(4)
  43#define COMMAND_COPY				BIT(5)
  44
  45#define PCI_ENDPOINT_TEST_STATUS		0x8
  46#define STATUS_READ_SUCCESS			BIT(0)
  47#define STATUS_READ_FAIL			BIT(1)
  48#define STATUS_WRITE_SUCCESS			BIT(2)
  49#define STATUS_WRITE_FAIL			BIT(3)
  50#define STATUS_COPY_SUCCESS			BIT(4)
  51#define STATUS_COPY_FAIL			BIT(5)
  52#define STATUS_IRQ_RAISED			BIT(6)
  53#define STATUS_SRC_ADDR_INVALID			BIT(7)
  54#define STATUS_DST_ADDR_INVALID			BIT(8)
  55
  56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
  58
  59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
  60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
  61
  62#define PCI_ENDPOINT_TEST_SIZE			0x1c
  63#define PCI_ENDPOINT_TEST_CHECKSUM		0x20
  64
  65#define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
  66#define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
  67
  68#define PCI_ENDPOINT_TEST_FLAGS			0x2c
  69#define FLAG_USE_DMA				BIT(0)
  70
  71#define PCI_DEVICE_ID_TI_AM654			0xb00c
  72#define PCI_DEVICE_ID_TI_J7200			0xb00f
  73#define PCI_DEVICE_ID_TI_AM64			0xb010
  74#define PCI_DEVICE_ID_LS1088A			0x80c0
  75
  76#define is_am654_pci_dev(pdev)		\
  77		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  78
  79#define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
  80#define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
  81#define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
  82#define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
  83
  84static DEFINE_IDA(pci_endpoint_test_ida);
  85
  86#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  87					    miscdev)
  88
  89static bool no_msi;
  90module_param(no_msi, bool, 0444);
  91MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  92
  93static int irq_type = IRQ_TYPE_MSI;
  94module_param(irq_type, int, 0444);
  95MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  96
  97enum pci_barno {
  98	BAR_0,
  99	BAR_1,
 100	BAR_2,
 101	BAR_3,
 102	BAR_4,
 103	BAR_5,
 104};
 105
 106struct pci_endpoint_test {
 107	struct pci_dev	*pdev;
 108	void __iomem	*base;
 109	void __iomem	*bar[PCI_STD_NUM_BARS];
 110	struct completion irq_raised;
 111	int		last_irq;
 112	int		num_irqs;
 113	int		irq_type;
 114	/* mutex to protect the ioctls */
 115	struct mutex	mutex;
 116	struct miscdevice miscdev;
 117	enum pci_barno test_reg_bar;
 118	size_t alignment;
 119	const char *name;
 120};
 121
 122struct pci_endpoint_test_data {
 123	enum pci_barno test_reg_bar;
 124	size_t alignment;
 125	int irq_type;
 126};
 127
 128static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
 129					  u32 offset)
 130{
 131	return readl(test->base + offset);
 132}
 133
 134static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
 135					    u32 offset, u32 value)
 136{
 137	writel(value, test->base + offset);
 138}
 139
 140static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
 141					      int bar, int offset)
 142{
 143	return readl(test->bar[bar] + offset);
 144}
 145
 146static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
 147						int bar, u32 offset, u32 value)
 148{
 149	writel(value, test->bar[bar] + offset);
 150}
 151
 152static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
 153{
 154	struct pci_endpoint_test *test = dev_id;
 155	u32 reg;
 156
 157	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
 158	if (reg & STATUS_IRQ_RAISED) {
 159		test->last_irq = irq;
 160		complete(&test->irq_raised);
 161		reg &= ~STATUS_IRQ_RAISED;
 162	}
 163	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
 164				 reg);
 165
 166	return IRQ_HANDLED;
 167}
 168
 169static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
 170{
 171	struct pci_dev *pdev = test->pdev;
 172
 173	pci_free_irq_vectors(pdev);
 174	test->irq_type = IRQ_TYPE_UNDEFINED;
 175}
 176
 177static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
 178						int type)
 179{
 180	int irq = -1;
 181	struct pci_dev *pdev = test->pdev;
 182	struct device *dev = &pdev->dev;
 183	bool res = true;
 184
 185	switch (type) {
 186	case IRQ_TYPE_LEGACY:
 187		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
 188		if (irq < 0)
 189			dev_err(dev, "Failed to get Legacy interrupt\n");
 190		break;
 191	case IRQ_TYPE_MSI:
 192		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
 193		if (irq < 0)
 194			dev_err(dev, "Failed to get MSI interrupts\n");
 195		break;
 196	case IRQ_TYPE_MSIX:
 197		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
 198		if (irq < 0)
 199			dev_err(dev, "Failed to get MSI-X interrupts\n");
 200		break;
 201	default:
 202		dev_err(dev, "Invalid IRQ type selected\n");
 203	}
 204
 205	if (irq < 0) {
 206		irq = 0;
 207		res = false;
 208	}
 209
 210	test->irq_type = type;
 211	test->num_irqs = irq;
 212
 213	return res;
 214}
 215
 216static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
 217{
 218	int i;
 219	struct pci_dev *pdev = test->pdev;
 220	struct device *dev = &pdev->dev;
 221
 222	for (i = 0; i < test->num_irqs; i++)
 223		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
 224
 225	test->num_irqs = 0;
 226}
 227
 228static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
 229{
 230	int i;
 231	int err;
 232	struct pci_dev *pdev = test->pdev;
 233	struct device *dev = &pdev->dev;
 234
 235	for (i = 0; i < test->num_irqs; i++) {
 236		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
 237				       pci_endpoint_test_irqhandler,
 238				       IRQF_SHARED, test->name, test);
 239		if (err)
 240			goto fail;
 241	}
 242
 243	return true;
 244
 245fail:
 246	switch (irq_type) {
 247	case IRQ_TYPE_LEGACY:
 248		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
 249			pci_irq_vector(pdev, i));
 250		break;
 251	case IRQ_TYPE_MSI:
 252		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
 253			pci_irq_vector(pdev, i),
 254			i + 1);
 255		break;
 256	case IRQ_TYPE_MSIX:
 257		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
 258			pci_irq_vector(pdev, i),
 259			i + 1);
 260		break;
 261	}
 262
 263	return false;
 264}
 265
 266static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
 267				  enum pci_barno barno)
 268{
 269	int j;
 270	u32 val;
 271	int size;
 272	struct pci_dev *pdev = test->pdev;
 273
 274	if (!test->bar[barno])
 275		return false;
 276
 277	size = pci_resource_len(pdev, barno);
 278
 279	if (barno == test->test_reg_bar)
 280		size = 0x4;
 281
 282	for (j = 0; j < size; j += 4)
 283		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
 284
 285	for (j = 0; j < size; j += 4) {
 286		val = pci_endpoint_test_bar_readl(test, barno, j);
 287		if (val != 0xA0A0A0A0)
 288			return false;
 289	}
 290
 291	return true;
 292}
 293
 294static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
 295{
 296	u32 val;
 297
 298	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
 299				 IRQ_TYPE_LEGACY);
 300	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
 301	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 302				 COMMAND_RAISE_LEGACY_IRQ);
 303	val = wait_for_completion_timeout(&test->irq_raised,
 304					  msecs_to_jiffies(1000));
 305	if (!val)
 306		return false;
 307
 308	return true;
 309}
 310
 311static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
 312				       u16 msi_num, bool msix)
 313{
 314	u32 val;
 315	struct pci_dev *pdev = test->pdev;
 316
 317	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
 318				 msix == false ? IRQ_TYPE_MSI :
 319				 IRQ_TYPE_MSIX);
 320	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
 321	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 322				 msix == false ? COMMAND_RAISE_MSI_IRQ :
 323				 COMMAND_RAISE_MSIX_IRQ);
 324	val = wait_for_completion_timeout(&test->irq_raised,
 325					  msecs_to_jiffies(1000));
 326	if (!val)
 327		return false;
 328
 329	if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
 330		return true;
 331
 332	return false;
 333}
 334
 335static int pci_endpoint_test_validate_xfer_params(struct device *dev,
 336		struct pci_endpoint_test_xfer_param *param, size_t alignment)
 337{
 338	if (!param->size) {
 339		dev_dbg(dev, "Data size is zero\n");
 340		return -EINVAL;
 341	}
 342
 343	if (param->size > SIZE_MAX - alignment) {
 344		dev_dbg(dev, "Maximum transfer data size exceeded\n");
 345		return -EINVAL;
 346	}
 347
 348	return 0;
 349}
 350
 351static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
 352				   unsigned long arg)
 353{
 354	struct pci_endpoint_test_xfer_param param;
 355	bool ret = false;
 356	void *src_addr;
 357	void *dst_addr;
 358	u32 flags = 0;
 359	bool use_dma;
 360	size_t size;
 361	dma_addr_t src_phys_addr;
 362	dma_addr_t dst_phys_addr;
 363	struct pci_dev *pdev = test->pdev;
 364	struct device *dev = &pdev->dev;
 365	void *orig_src_addr;
 366	dma_addr_t orig_src_phys_addr;
 367	void *orig_dst_addr;
 368	dma_addr_t orig_dst_phys_addr;
 369	size_t offset;
 370	size_t alignment = test->alignment;
 371	int irq_type = test->irq_type;
 372	u32 src_crc32;
 373	u32 dst_crc32;
 374	int err;
 375
 376	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 377	if (err) {
 378		dev_err(dev, "Failed to get transfer param\n");
 379		return false;
 380	}
 381
 382	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 383	if (err)
 384		return false;
 385
 386	size = param.size;
 387
 388	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 389	if (use_dma)
 390		flags |= FLAG_USE_DMA;
 391
 392	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
 393		dev_err(dev, "Invalid IRQ type option\n");
 394		goto err;
 395	}
 396
 397	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 398	if (!orig_src_addr) {
 399		dev_err(dev, "Failed to allocate source buffer\n");
 400		ret = false;
 401		goto err;
 402	}
 403
 404	get_random_bytes(orig_src_addr, size + alignment);
 405	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
 406					    size + alignment, DMA_TO_DEVICE);
 407	if (dma_mapping_error(dev, orig_src_phys_addr)) {
 408		dev_err(dev, "failed to map source buffer address\n");
 409		ret = false;
 410		goto err_src_phys_addr;
 411	}
 412
 413	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
 414		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
 415		offset = src_phys_addr - orig_src_phys_addr;
 416		src_addr = orig_src_addr + offset;
 417	} else {
 418		src_phys_addr = orig_src_phys_addr;
 419		src_addr = orig_src_addr;
 420	}
 421
 422	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
 423				 lower_32_bits(src_phys_addr));
 424
 425	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
 426				 upper_32_bits(src_phys_addr));
 427
 
 428	src_crc32 = crc32_le(~0, src_addr, size);
 429
 430	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 431	if (!orig_dst_addr) {
 432		dev_err(dev, "Failed to allocate destination address\n");
 433		ret = false;
 434		goto err_dst_addr;
 435	}
 436
 437	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
 438					    size + alignment, DMA_FROM_DEVICE);
 439	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
 440		dev_err(dev, "failed to map destination buffer address\n");
 441		ret = false;
 442		goto err_dst_phys_addr;
 443	}
 444
 445	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
 446		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
 447		offset = dst_phys_addr - orig_dst_phys_addr;
 448		dst_addr = orig_dst_addr + offset;
 449	} else {
 450		dst_phys_addr = orig_dst_phys_addr;
 451		dst_addr = orig_dst_addr;
 452	}
 453
 454	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
 455				 lower_32_bits(dst_phys_addr));
 456	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
 457				 upper_32_bits(dst_phys_addr));
 458
 459	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
 460				 size);
 461
 462	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 463	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 464	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 465	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 466				 COMMAND_COPY);
 467
 468	wait_for_completion(&test->irq_raised);
 469
 470	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
 471			 DMA_FROM_DEVICE);
 472
 473	dst_crc32 = crc32_le(~0, dst_addr, size);
 474	if (dst_crc32 == src_crc32)
 475		ret = true;
 476
 477err_dst_phys_addr:
 478	kfree(orig_dst_addr);
 479
 480err_dst_addr:
 481	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
 482			 DMA_TO_DEVICE);
 483
 484err_src_phys_addr:
 485	kfree(orig_src_addr);
 
 486
 487err:
 488	return ret;
 489}
 490
 491static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
 492				    unsigned long arg)
 493{
 494	struct pci_endpoint_test_xfer_param param;
 495	bool ret = false;
 496	u32 flags = 0;
 497	bool use_dma;
 498	u32 reg;
 499	void *addr;
 500	dma_addr_t phys_addr;
 501	struct pci_dev *pdev = test->pdev;
 502	struct device *dev = &pdev->dev;
 503	void *orig_addr;
 504	dma_addr_t orig_phys_addr;
 505	size_t offset;
 506	size_t alignment = test->alignment;
 507	int irq_type = test->irq_type;
 508	size_t size;
 509	u32 crc32;
 510	int err;
 511
 512	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 513	if (err != 0) {
 514		dev_err(dev, "Failed to get transfer param\n");
 515		return false;
 516	}
 517
 518	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 519	if (err)
 520		return false;
 521
 522	size = param.size;
 523
 524	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 525	if (use_dma)
 526		flags |= FLAG_USE_DMA;
 527
 528	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
 529		dev_err(dev, "Invalid IRQ type option\n");
 530		goto err;
 531	}
 532
 533	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 534	if (!orig_addr) {
 535		dev_err(dev, "Failed to allocate address\n");
 536		ret = false;
 537		goto err;
 538	}
 539
 540	get_random_bytes(orig_addr, size + alignment);
 541
 542	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
 543					DMA_TO_DEVICE);
 544	if (dma_mapping_error(dev, orig_phys_addr)) {
 545		dev_err(dev, "failed to map source buffer address\n");
 546		ret = false;
 547		goto err_phys_addr;
 548	}
 549
 550	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
 551		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
 552		offset = phys_addr - orig_phys_addr;
 553		addr = orig_addr + offset;
 554	} else {
 555		phys_addr = orig_phys_addr;
 556		addr = orig_addr;
 557	}
 558
 
 
 559	crc32 = crc32_le(~0, addr, size);
 560	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
 561				 crc32);
 562
 563	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
 564				 lower_32_bits(phys_addr));
 565	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
 566				 upper_32_bits(phys_addr));
 567
 568	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
 569
 570	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 571	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 572	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 573	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 574				 COMMAND_READ);
 575
 576	wait_for_completion(&test->irq_raised);
 577
 578	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
 579	if (reg & STATUS_READ_SUCCESS)
 580		ret = true;
 581
 582	dma_unmap_single(dev, orig_phys_addr, size + alignment,
 583			 DMA_TO_DEVICE);
 584
 585err_phys_addr:
 586	kfree(orig_addr);
 587
 588err:
 589	return ret;
 590}
 591
 592static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
 593				   unsigned long arg)
 594{
 595	struct pci_endpoint_test_xfer_param param;
 596	bool ret = false;
 597	u32 flags = 0;
 598	bool use_dma;
 599	size_t size;
 600	void *addr;
 601	dma_addr_t phys_addr;
 602	struct pci_dev *pdev = test->pdev;
 603	struct device *dev = &pdev->dev;
 604	void *orig_addr;
 605	dma_addr_t orig_phys_addr;
 606	size_t offset;
 607	size_t alignment = test->alignment;
 608	int irq_type = test->irq_type;
 609	u32 crc32;
 610	int err;
 611
 612	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
 613	if (err) {
 614		dev_err(dev, "Failed to get transfer param\n");
 615		return false;
 616	}
 617
 618	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
 619	if (err)
 620		return false;
 621
 622	size = param.size;
 623
 624	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
 625	if (use_dma)
 626		flags |= FLAG_USE_DMA;
 627
 628	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
 629		dev_err(dev, "Invalid IRQ type option\n");
 630		goto err;
 631	}
 632
 633	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
 
 634	if (!orig_addr) {
 635		dev_err(dev, "Failed to allocate destination address\n");
 636		ret = false;
 637		goto err;
 638	}
 639
 640	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
 641					DMA_FROM_DEVICE);
 642	if (dma_mapping_error(dev, orig_phys_addr)) {
 643		dev_err(dev, "failed to map source buffer address\n");
 644		ret = false;
 645		goto err_phys_addr;
 646	}
 647
 648	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
 649		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
 650		offset = phys_addr - orig_phys_addr;
 651		addr = orig_addr + offset;
 652	} else {
 653		phys_addr = orig_phys_addr;
 654		addr = orig_addr;
 655	}
 656
 657	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
 658				 lower_32_bits(phys_addr));
 659	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
 660				 upper_32_bits(phys_addr));
 661
 662	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
 663
 664	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
 665	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
 666	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
 667	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
 668				 COMMAND_WRITE);
 669
 670	wait_for_completion(&test->irq_raised);
 671
 672	dma_unmap_single(dev, orig_phys_addr, size + alignment,
 673			 DMA_FROM_DEVICE);
 674
 675	crc32 = crc32_le(~0, addr, size);
 676	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
 677		ret = true;
 678
 679err_phys_addr:
 680	kfree(orig_addr);
 681err:
 682	return ret;
 683}
 684
 685static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
 686{
 687	pci_endpoint_test_release_irq(test);
 688	pci_endpoint_test_free_irq_vectors(test);
 689	return true;
 690}
 691
 692static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
 693				      int req_irq_type)
 694{
 695	struct pci_dev *pdev = test->pdev;
 696	struct device *dev = &pdev->dev;
 697
 698	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
 699		dev_err(dev, "Invalid IRQ type option\n");
 700		return false;
 701	}
 702
 703	if (test->irq_type == req_irq_type)
 704		return true;
 705
 706	pci_endpoint_test_release_irq(test);
 707	pci_endpoint_test_free_irq_vectors(test);
 708
 709	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
 710		goto err;
 711
 712	if (!pci_endpoint_test_request_irq(test))
 713		goto err;
 714
 715	return true;
 716
 717err:
 718	pci_endpoint_test_free_irq_vectors(test);
 719	return false;
 720}
 721
 722static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
 723				    unsigned long arg)
 724{
 725	int ret = -EINVAL;
 726	enum pci_barno bar;
 727	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
 728	struct pci_dev *pdev = test->pdev;
 729
 730	mutex_lock(&test->mutex);
 731	switch (cmd) {
 732	case PCITEST_BAR:
 733		bar = arg;
 734		if (bar > BAR_5)
 735			goto ret;
 736		if (is_am654_pci_dev(pdev) && bar == BAR_0)
 737			goto ret;
 738		ret = pci_endpoint_test_bar(test, bar);
 739		break;
 740	case PCITEST_LEGACY_IRQ:
 741		ret = pci_endpoint_test_legacy_irq(test);
 742		break;
 743	case PCITEST_MSI:
 744	case PCITEST_MSIX:
 745		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
 746		break;
 747	case PCITEST_WRITE:
 748		ret = pci_endpoint_test_write(test, arg);
 749		break;
 750	case PCITEST_READ:
 751		ret = pci_endpoint_test_read(test, arg);
 752		break;
 753	case PCITEST_COPY:
 754		ret = pci_endpoint_test_copy(test, arg);
 755		break;
 756	case PCITEST_SET_IRQTYPE:
 757		ret = pci_endpoint_test_set_irq(test, arg);
 758		break;
 759	case PCITEST_GET_IRQTYPE:
 760		ret = irq_type;
 761		break;
 762	case PCITEST_CLEAR_IRQ:
 763		ret = pci_endpoint_test_clear_irq(test);
 764		break;
 765	}
 766
 767ret:
 768	mutex_unlock(&test->mutex);
 769	return ret;
 770}
 771
 772static const struct file_operations pci_endpoint_test_fops = {
 773	.owner = THIS_MODULE,
 774	.unlocked_ioctl = pci_endpoint_test_ioctl,
 775};
 776
 777static int pci_endpoint_test_probe(struct pci_dev *pdev,
 778				   const struct pci_device_id *ent)
 779{
 
 780	int err;
 
 781	int id;
 782	char name[24];
 783	enum pci_barno bar;
 784	void __iomem *base;
 785	struct device *dev = &pdev->dev;
 786	struct pci_endpoint_test *test;
 787	struct pci_endpoint_test_data *data;
 788	enum pci_barno test_reg_bar = BAR_0;
 789	struct miscdevice *misc_device;
 790
 791	if (pci_is_bridge(pdev))
 792		return -ENODEV;
 793
 794	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
 795	if (!test)
 796		return -ENOMEM;
 797
 798	test->test_reg_bar = 0;
 799	test->alignment = 0;
 800	test->pdev = pdev;
 801	test->irq_type = IRQ_TYPE_UNDEFINED;
 802
 803	if (no_msi)
 804		irq_type = IRQ_TYPE_LEGACY;
 805
 806	data = (struct pci_endpoint_test_data *)ent->driver_data;
 807	if (data) {
 808		test_reg_bar = data->test_reg_bar;
 809		test->test_reg_bar = test_reg_bar;
 810		test->alignment = data->alignment;
 811		irq_type = data->irq_type;
 812	}
 813
 814	init_completion(&test->irq_raised);
 815	mutex_init(&test->mutex);
 816
 817	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
 818	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
 819		dev_err(dev, "Cannot set DMA mask\n");
 820		return -EINVAL;
 821	}
 822
 823	err = pci_enable_device(pdev);
 824	if (err) {
 825		dev_err(dev, "Cannot enable PCI device\n");
 826		return err;
 827	}
 828
 829	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 830	if (err) {
 831		dev_err(dev, "Cannot obtain PCI resources\n");
 832		goto err_disable_pdev;
 833	}
 834
 835	pci_set_master(pdev);
 836
 837	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
 838		err = -EINVAL;
 839		goto err_disable_irq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840	}
 841
 842	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 843		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 844			base = pci_ioremap_bar(pdev, bar);
 845			if (!base) {
 846				dev_err(dev, "Failed to read BAR%d\n", bar);
 847				WARN_ON(bar == test_reg_bar);
 848			}
 849			test->bar[bar] = base;
 850		}
 851	}
 852
 853	test->base = test->bar[test_reg_bar];
 854	if (!test->base) {
 855		err = -ENOMEM;
 856		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
 857			test_reg_bar);
 858		goto err_iounmap;
 859	}
 860
 861	pci_set_drvdata(pdev, test);
 862
 863	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
 864	if (id < 0) {
 865		err = id;
 866		dev_err(dev, "Unable to get id\n");
 867		goto err_iounmap;
 868	}
 869
 870	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
 871	test->name = kstrdup(name, GFP_KERNEL);
 872	if (!test->name) {
 873		err = -ENOMEM;
 874		goto err_ida_remove;
 875	}
 876
 877	if (!pci_endpoint_test_request_irq(test)) {
 878		err = -EINVAL;
 879		goto err_kfree_test_name;
 880	}
 881
 882	misc_device = &test->miscdev;
 883	misc_device->minor = MISC_DYNAMIC_MINOR;
 884	misc_device->name = kstrdup(name, GFP_KERNEL);
 885	if (!misc_device->name) {
 886		err = -ENOMEM;
 887		goto err_release_irq;
 888	}
 889	misc_device->parent = &pdev->dev;
 890	misc_device->fops = &pci_endpoint_test_fops;
 891
 892	err = misc_register(misc_device);
 893	if (err) {
 894		dev_err(dev, "Failed to register device\n");
 895		goto err_kfree_name;
 896	}
 897
 898	return 0;
 899
 900err_kfree_name:
 901	kfree(misc_device->name);
 902
 903err_release_irq:
 904	pci_endpoint_test_release_irq(test);
 905
 906err_kfree_test_name:
 907	kfree(test->name);
 908
 909err_ida_remove:
 910	ida_simple_remove(&pci_endpoint_test_ida, id);
 911
 912err_iounmap:
 913	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 914		if (test->bar[bar])
 915			pci_iounmap(pdev, test->bar[bar]);
 916	}
 917
 918err_disable_irq:
 919	pci_endpoint_test_free_irq_vectors(test);
 
 
 
 920	pci_release_regions(pdev);
 921
 922err_disable_pdev:
 923	pci_disable_device(pdev);
 924
 925	return err;
 926}
 927
 928static void pci_endpoint_test_remove(struct pci_dev *pdev)
 929{
 930	int id;
 
 931	enum pci_barno bar;
 932	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
 933	struct miscdevice *misc_device = &test->miscdev;
 934
 935	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
 936		return;
 937	if (id < 0)
 938		return;
 939
 940	misc_deregister(&test->miscdev);
 941	kfree(misc_device->name);
 942	kfree(test->name);
 943	ida_simple_remove(&pci_endpoint_test_ida, id);
 944	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 945		if (test->bar[bar])
 946			pci_iounmap(pdev, test->bar[bar]);
 947	}
 948
 949	pci_endpoint_test_release_irq(test);
 950	pci_endpoint_test_free_irq_vectors(test);
 951
 952	pci_release_regions(pdev);
 953	pci_disable_device(pdev);
 954}
 955
 956static const struct pci_endpoint_test_data default_data = {
 957	.test_reg_bar = BAR_0,
 958	.alignment = SZ_4K,
 959	.irq_type = IRQ_TYPE_MSI,
 960};
 961
 962static const struct pci_endpoint_test_data am654_data = {
 963	.test_reg_bar = BAR_2,
 964	.alignment = SZ_64K,
 965	.irq_type = IRQ_TYPE_MSI,
 966};
 967
 968static const struct pci_endpoint_test_data j721e_data = {
 969	.alignment = 256,
 970	.irq_type = IRQ_TYPE_MSI,
 971};
 972
 973static const struct pci_device_id pci_endpoint_test_tbl[] = {
 974	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
 975	  .driver_data = (kernel_ulong_t)&default_data,
 976	},
 977	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
 978	  .driver_data = (kernel_ulong_t)&default_data,
 979	},
 980	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
 981	  .driver_data = (kernel_ulong_t)&default_data,
 982	},
 983	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
 984	  .driver_data = (kernel_ulong_t)&default_data,
 985	},
 986	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
 987	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
 988	  .driver_data = (kernel_ulong_t)&am654_data
 989	},
 990	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
 991	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
 992	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
 993	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
 994	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
 995	  .driver_data = (kernel_ulong_t)&j721e_data,
 996	},
 997	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
 998	  .driver_data = (kernel_ulong_t)&j721e_data,
 999	},
1000	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1001	  .driver_data = (kernel_ulong_t)&j721e_data,
1002	},
1003	{ }
1004};
1005MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1006
1007static struct pci_driver pci_endpoint_test_driver = {
1008	.name		= DRV_MODULE_NAME,
1009	.id_table	= pci_endpoint_test_tbl,
1010	.probe		= pci_endpoint_test_probe,
1011	.remove		= pci_endpoint_test_remove,
1012	.sriov_configure = pci_sriov_configure_simple,
1013};
1014module_pci_driver(pci_endpoint_test_driver);
1015
1016MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1017MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1018MODULE_LICENSE("GPL v2");