Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/fs.h>
12#include <linux/io.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/random.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/pci_regs.h>
25
26#include <uapi/linux/pcitest.h>
27
28#define DRV_MODULE_NAME "pci-endpoint-test"
29
30#define IRQ_TYPE_UNDEFINED -1
31#define IRQ_TYPE_INTX 0
32#define IRQ_TYPE_MSI 1
33#define IRQ_TYPE_MSIX 2
34
35#define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37#define PCI_ENDPOINT_TEST_COMMAND 0x4
38#define COMMAND_RAISE_INTX_IRQ BIT(0)
39#define COMMAND_RAISE_MSI_IRQ BIT(1)
40#define COMMAND_RAISE_MSIX_IRQ BIT(2)
41#define COMMAND_READ BIT(3)
42#define COMMAND_WRITE BIT(4)
43#define COMMAND_COPY BIT(5)
44
45#define PCI_ENDPOINT_TEST_STATUS 0x8
46#define STATUS_READ_SUCCESS BIT(0)
47#define STATUS_READ_FAIL BIT(1)
48#define STATUS_WRITE_SUCCESS BIT(2)
49#define STATUS_WRITE_FAIL BIT(3)
50#define STATUS_COPY_SUCCESS BIT(4)
51#define STATUS_COPY_FAIL BIT(5)
52#define STATUS_IRQ_RAISED BIT(6)
53#define STATUS_SRC_ADDR_INVALID BIT(7)
54#define STATUS_DST_ADDR_INVALID BIT(8)
55
56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
62#define PCI_ENDPOINT_TEST_SIZE 0x1c
63#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
67
68#define PCI_ENDPOINT_TEST_FLAGS 0x2c
69#define FLAG_USE_DMA BIT(0)
70
71#define PCI_DEVICE_ID_TI_AM654 0xb00c
72#define PCI_DEVICE_ID_TI_J7200 0xb00f
73#define PCI_DEVICE_ID_TI_AM64 0xb010
74#define PCI_DEVICE_ID_TI_J721S2 0xb013
75#define PCI_DEVICE_ID_LS1088A 0x80c0
76#define PCI_DEVICE_ID_IMX8 0x0808
77
78#define is_am654_pci_dev(pdev) \
79 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
80
81#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
82#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
83#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
84#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
85#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
86
87static DEFINE_IDA(pci_endpoint_test_ida);
88
89#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
90 miscdev)
91
92static bool no_msi;
93module_param(no_msi, bool, 0444);
94MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
95
96static int irq_type = IRQ_TYPE_MSI;
97module_param(irq_type, int, 0444);
98MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
99
100enum pci_barno {
101 BAR_0,
102 BAR_1,
103 BAR_2,
104 BAR_3,
105 BAR_4,
106 BAR_5,
107};
108
109struct pci_endpoint_test {
110 struct pci_dev *pdev;
111 void __iomem *base;
112 void __iomem *bar[PCI_STD_NUM_BARS];
113 struct completion irq_raised;
114 int last_irq;
115 int num_irqs;
116 int irq_type;
117 /* mutex to protect the ioctls */
118 struct mutex mutex;
119 struct miscdevice miscdev;
120 enum pci_barno test_reg_bar;
121 size_t alignment;
122 const char *name;
123};
124
125struct pci_endpoint_test_data {
126 enum pci_barno test_reg_bar;
127 size_t alignment;
128 int irq_type;
129};
130
131static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
132 u32 offset)
133{
134 return readl(test->base + offset);
135}
136
137static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
138 u32 offset, u32 value)
139{
140 writel(value, test->base + offset);
141}
142
143static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
144 int bar, int offset)
145{
146 return readl(test->bar[bar] + offset);
147}
148
149static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
150 int bar, u32 offset, u32 value)
151{
152 writel(value, test->bar[bar] + offset);
153}
154
155static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
156{
157 struct pci_endpoint_test *test = dev_id;
158 u32 reg;
159
160 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
161 if (reg & STATUS_IRQ_RAISED) {
162 test->last_irq = irq;
163 complete(&test->irq_raised);
164 }
165
166 return IRQ_HANDLED;
167}
168
169static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
170{
171 struct pci_dev *pdev = test->pdev;
172
173 pci_free_irq_vectors(pdev);
174 test->irq_type = IRQ_TYPE_UNDEFINED;
175}
176
177static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
178 int type)
179{
180 int irq = -1;
181 struct pci_dev *pdev = test->pdev;
182 struct device *dev = &pdev->dev;
183 bool res = true;
184
185 switch (type) {
186 case IRQ_TYPE_INTX:
187 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
188 if (irq < 0)
189 dev_err(dev, "Failed to get Legacy interrupt\n");
190 break;
191 case IRQ_TYPE_MSI:
192 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
193 if (irq < 0)
194 dev_err(dev, "Failed to get MSI interrupts\n");
195 break;
196 case IRQ_TYPE_MSIX:
197 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
198 if (irq < 0)
199 dev_err(dev, "Failed to get MSI-X interrupts\n");
200 break;
201 default:
202 dev_err(dev, "Invalid IRQ type selected\n");
203 }
204
205 if (irq < 0) {
206 irq = 0;
207 res = false;
208 }
209
210 test->irq_type = type;
211 test->num_irqs = irq;
212
213 return res;
214}
215
216static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
217{
218 int i;
219 struct pci_dev *pdev = test->pdev;
220 struct device *dev = &pdev->dev;
221
222 for (i = 0; i < test->num_irqs; i++)
223 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
224
225 test->num_irqs = 0;
226}
227
228static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
229{
230 int i;
231 int err;
232 struct pci_dev *pdev = test->pdev;
233 struct device *dev = &pdev->dev;
234
235 for (i = 0; i < test->num_irqs; i++) {
236 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
237 pci_endpoint_test_irqhandler,
238 IRQF_SHARED, test->name, test);
239 if (err)
240 goto fail;
241 }
242
243 return true;
244
245fail:
246 switch (irq_type) {
247 case IRQ_TYPE_INTX:
248 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
249 pci_irq_vector(pdev, i));
250 break;
251 case IRQ_TYPE_MSI:
252 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
253 pci_irq_vector(pdev, i),
254 i + 1);
255 break;
256 case IRQ_TYPE_MSIX:
257 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
258 pci_irq_vector(pdev, i),
259 i + 1);
260 break;
261 }
262
263 return false;
264}
265
266static const u32 bar_test_pattern[] = {
267 0xA0A0A0A0,
268 0xA1A1A1A1,
269 0xA2A2A2A2,
270 0xA3A3A3A3,
271 0xA4A4A4A4,
272 0xA5A5A5A5,
273};
274
275static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
276 enum pci_barno barno)
277{
278 int j;
279 u32 val;
280 int size;
281 struct pci_dev *pdev = test->pdev;
282
283 if (!test->bar[barno])
284 return false;
285
286 size = pci_resource_len(pdev, barno);
287
288 if (barno == test->test_reg_bar)
289 size = 0x4;
290
291 for (j = 0; j < size; j += 4)
292 pci_endpoint_test_bar_writel(test, barno, j,
293 bar_test_pattern[barno]);
294
295 for (j = 0; j < size; j += 4) {
296 val = pci_endpoint_test_bar_readl(test, barno, j);
297 if (val != bar_test_pattern[barno])
298 return false;
299 }
300
301 return true;
302}
303
304static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
305{
306 u32 val;
307
308 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
309 IRQ_TYPE_INTX);
310 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
311 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
312 COMMAND_RAISE_INTX_IRQ);
313 val = wait_for_completion_timeout(&test->irq_raised,
314 msecs_to_jiffies(1000));
315 if (!val)
316 return false;
317
318 return true;
319}
320
321static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
322 u16 msi_num, bool msix)
323{
324 u32 val;
325 struct pci_dev *pdev = test->pdev;
326
327 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
328 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
329 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
330 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
331 msix ? COMMAND_RAISE_MSIX_IRQ :
332 COMMAND_RAISE_MSI_IRQ);
333 val = wait_for_completion_timeout(&test->irq_raised,
334 msecs_to_jiffies(1000));
335 if (!val)
336 return false;
337
338 return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
339}
340
341static int pci_endpoint_test_validate_xfer_params(struct device *dev,
342 struct pci_endpoint_test_xfer_param *param, size_t alignment)
343{
344 if (!param->size) {
345 dev_dbg(dev, "Data size is zero\n");
346 return -EINVAL;
347 }
348
349 if (param->size > SIZE_MAX - alignment) {
350 dev_dbg(dev, "Maximum transfer data size exceeded\n");
351 return -EINVAL;
352 }
353
354 return 0;
355}
356
357static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
358 unsigned long arg)
359{
360 struct pci_endpoint_test_xfer_param param;
361 bool ret = false;
362 void *src_addr;
363 void *dst_addr;
364 u32 flags = 0;
365 bool use_dma;
366 size_t size;
367 dma_addr_t src_phys_addr;
368 dma_addr_t dst_phys_addr;
369 struct pci_dev *pdev = test->pdev;
370 struct device *dev = &pdev->dev;
371 void *orig_src_addr;
372 dma_addr_t orig_src_phys_addr;
373 void *orig_dst_addr;
374 dma_addr_t orig_dst_phys_addr;
375 size_t offset;
376 size_t alignment = test->alignment;
377 int irq_type = test->irq_type;
378 u32 src_crc32;
379 u32 dst_crc32;
380 int err;
381
382 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
383 if (err) {
384 dev_err(dev, "Failed to get transfer param\n");
385 return false;
386 }
387
388 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
389 if (err)
390 return false;
391
392 size = param.size;
393
394 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
395 if (use_dma)
396 flags |= FLAG_USE_DMA;
397
398 if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
399 dev_err(dev, "Invalid IRQ type option\n");
400 goto err;
401 }
402
403 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
404 if (!orig_src_addr) {
405 dev_err(dev, "Failed to allocate source buffer\n");
406 ret = false;
407 goto err;
408 }
409
410 get_random_bytes(orig_src_addr, size + alignment);
411 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
412 size + alignment, DMA_TO_DEVICE);
413 if (dma_mapping_error(dev, orig_src_phys_addr)) {
414 dev_err(dev, "failed to map source buffer address\n");
415 ret = false;
416 goto err_src_phys_addr;
417 }
418
419 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
420 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
421 offset = src_phys_addr - orig_src_phys_addr;
422 src_addr = orig_src_addr + offset;
423 } else {
424 src_phys_addr = orig_src_phys_addr;
425 src_addr = orig_src_addr;
426 }
427
428 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
429 lower_32_bits(src_phys_addr));
430
431 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
432 upper_32_bits(src_phys_addr));
433
434 src_crc32 = crc32_le(~0, src_addr, size);
435
436 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
437 if (!orig_dst_addr) {
438 dev_err(dev, "Failed to allocate destination address\n");
439 ret = false;
440 goto err_dst_addr;
441 }
442
443 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
444 size + alignment, DMA_FROM_DEVICE);
445 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
446 dev_err(dev, "failed to map destination buffer address\n");
447 ret = false;
448 goto err_dst_phys_addr;
449 }
450
451 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
452 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
453 offset = dst_phys_addr - orig_dst_phys_addr;
454 dst_addr = orig_dst_addr + offset;
455 } else {
456 dst_phys_addr = orig_dst_phys_addr;
457 dst_addr = orig_dst_addr;
458 }
459
460 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
461 lower_32_bits(dst_phys_addr));
462 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
463 upper_32_bits(dst_phys_addr));
464
465 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
466 size);
467
468 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
469 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
470 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
471 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
472 COMMAND_COPY);
473
474 wait_for_completion(&test->irq_raised);
475
476 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
477 DMA_FROM_DEVICE);
478
479 dst_crc32 = crc32_le(~0, dst_addr, size);
480 if (dst_crc32 == src_crc32)
481 ret = true;
482
483err_dst_phys_addr:
484 kfree(orig_dst_addr);
485
486err_dst_addr:
487 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
488 DMA_TO_DEVICE);
489
490err_src_phys_addr:
491 kfree(orig_src_addr);
492
493err:
494 return ret;
495}
496
497static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
498 unsigned long arg)
499{
500 struct pci_endpoint_test_xfer_param param;
501 bool ret = false;
502 u32 flags = 0;
503 bool use_dma;
504 u32 reg;
505 void *addr;
506 dma_addr_t phys_addr;
507 struct pci_dev *pdev = test->pdev;
508 struct device *dev = &pdev->dev;
509 void *orig_addr;
510 dma_addr_t orig_phys_addr;
511 size_t offset;
512 size_t alignment = test->alignment;
513 int irq_type = test->irq_type;
514 size_t size;
515 u32 crc32;
516 int err;
517
518 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
519 if (err != 0) {
520 dev_err(dev, "Failed to get transfer param\n");
521 return false;
522 }
523
524 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
525 if (err)
526 return false;
527
528 size = param.size;
529
530 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
531 if (use_dma)
532 flags |= FLAG_USE_DMA;
533
534 if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
535 dev_err(dev, "Invalid IRQ type option\n");
536 goto err;
537 }
538
539 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
540 if (!orig_addr) {
541 dev_err(dev, "Failed to allocate address\n");
542 ret = false;
543 goto err;
544 }
545
546 get_random_bytes(orig_addr, size + alignment);
547
548 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
549 DMA_TO_DEVICE);
550 if (dma_mapping_error(dev, orig_phys_addr)) {
551 dev_err(dev, "failed to map source buffer address\n");
552 ret = false;
553 goto err_phys_addr;
554 }
555
556 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
557 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
558 offset = phys_addr - orig_phys_addr;
559 addr = orig_addr + offset;
560 } else {
561 phys_addr = orig_phys_addr;
562 addr = orig_addr;
563 }
564
565 crc32 = crc32_le(~0, addr, size);
566 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
567 crc32);
568
569 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
570 lower_32_bits(phys_addr));
571 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
572 upper_32_bits(phys_addr));
573
574 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
575
576 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
577 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
578 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
579 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
580 COMMAND_READ);
581
582 wait_for_completion(&test->irq_raised);
583
584 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
585 if (reg & STATUS_READ_SUCCESS)
586 ret = true;
587
588 dma_unmap_single(dev, orig_phys_addr, size + alignment,
589 DMA_TO_DEVICE);
590
591err_phys_addr:
592 kfree(orig_addr);
593
594err:
595 return ret;
596}
597
598static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
599 unsigned long arg)
600{
601 struct pci_endpoint_test_xfer_param param;
602 bool ret = false;
603 u32 flags = 0;
604 bool use_dma;
605 size_t size;
606 void *addr;
607 dma_addr_t phys_addr;
608 struct pci_dev *pdev = test->pdev;
609 struct device *dev = &pdev->dev;
610 void *orig_addr;
611 dma_addr_t orig_phys_addr;
612 size_t offset;
613 size_t alignment = test->alignment;
614 int irq_type = test->irq_type;
615 u32 crc32;
616 int err;
617
618 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
619 if (err) {
620 dev_err(dev, "Failed to get transfer param\n");
621 return false;
622 }
623
624 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
625 if (err)
626 return false;
627
628 size = param.size;
629
630 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
631 if (use_dma)
632 flags |= FLAG_USE_DMA;
633
634 if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
635 dev_err(dev, "Invalid IRQ type option\n");
636 goto err;
637 }
638
639 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
640 if (!orig_addr) {
641 dev_err(dev, "Failed to allocate destination address\n");
642 ret = false;
643 goto err;
644 }
645
646 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
647 DMA_FROM_DEVICE);
648 if (dma_mapping_error(dev, orig_phys_addr)) {
649 dev_err(dev, "failed to map source buffer address\n");
650 ret = false;
651 goto err_phys_addr;
652 }
653
654 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
655 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
656 offset = phys_addr - orig_phys_addr;
657 addr = orig_addr + offset;
658 } else {
659 phys_addr = orig_phys_addr;
660 addr = orig_addr;
661 }
662
663 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
664 lower_32_bits(phys_addr));
665 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
666 upper_32_bits(phys_addr));
667
668 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
669
670 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
671 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
672 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
673 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
674 COMMAND_WRITE);
675
676 wait_for_completion(&test->irq_raised);
677
678 dma_unmap_single(dev, orig_phys_addr, size + alignment,
679 DMA_FROM_DEVICE);
680
681 crc32 = crc32_le(~0, addr, size);
682 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
683 ret = true;
684
685err_phys_addr:
686 kfree(orig_addr);
687err:
688 return ret;
689}
690
691static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
692{
693 pci_endpoint_test_release_irq(test);
694 pci_endpoint_test_free_irq_vectors(test);
695 return true;
696}
697
698static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
699 int req_irq_type)
700{
701 struct pci_dev *pdev = test->pdev;
702 struct device *dev = &pdev->dev;
703
704 if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
705 dev_err(dev, "Invalid IRQ type option\n");
706 return false;
707 }
708
709 if (test->irq_type == req_irq_type)
710 return true;
711
712 pci_endpoint_test_release_irq(test);
713 pci_endpoint_test_free_irq_vectors(test);
714
715 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
716 goto err;
717
718 if (!pci_endpoint_test_request_irq(test))
719 goto err;
720
721 return true;
722
723err:
724 pci_endpoint_test_free_irq_vectors(test);
725 return false;
726}
727
728static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
729 unsigned long arg)
730{
731 int ret = -EINVAL;
732 enum pci_barno bar;
733 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
734 struct pci_dev *pdev = test->pdev;
735
736 mutex_lock(&test->mutex);
737
738 reinit_completion(&test->irq_raised);
739 test->last_irq = -ENODATA;
740
741 switch (cmd) {
742 case PCITEST_BAR:
743 bar = arg;
744 if (bar > BAR_5)
745 goto ret;
746 if (is_am654_pci_dev(pdev) && bar == BAR_0)
747 goto ret;
748 ret = pci_endpoint_test_bar(test, bar);
749 break;
750 case PCITEST_INTX_IRQ:
751 ret = pci_endpoint_test_intx_irq(test);
752 break;
753 case PCITEST_MSI:
754 case PCITEST_MSIX:
755 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
756 break;
757 case PCITEST_WRITE:
758 ret = pci_endpoint_test_write(test, arg);
759 break;
760 case PCITEST_READ:
761 ret = pci_endpoint_test_read(test, arg);
762 break;
763 case PCITEST_COPY:
764 ret = pci_endpoint_test_copy(test, arg);
765 break;
766 case PCITEST_SET_IRQTYPE:
767 ret = pci_endpoint_test_set_irq(test, arg);
768 break;
769 case PCITEST_GET_IRQTYPE:
770 ret = irq_type;
771 break;
772 case PCITEST_CLEAR_IRQ:
773 ret = pci_endpoint_test_clear_irq(test);
774 break;
775 }
776
777ret:
778 mutex_unlock(&test->mutex);
779 return ret;
780}
781
782static const struct file_operations pci_endpoint_test_fops = {
783 .owner = THIS_MODULE,
784 .unlocked_ioctl = pci_endpoint_test_ioctl,
785};
786
787static int pci_endpoint_test_probe(struct pci_dev *pdev,
788 const struct pci_device_id *ent)
789{
790 int err;
791 int id;
792 char name[24];
793 enum pci_barno bar;
794 void __iomem *base;
795 struct device *dev = &pdev->dev;
796 struct pci_endpoint_test *test;
797 struct pci_endpoint_test_data *data;
798 enum pci_barno test_reg_bar = BAR_0;
799 struct miscdevice *misc_device;
800
801 if (pci_is_bridge(pdev))
802 return -ENODEV;
803
804 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
805 if (!test)
806 return -ENOMEM;
807
808 test->test_reg_bar = 0;
809 test->alignment = 0;
810 test->pdev = pdev;
811 test->irq_type = IRQ_TYPE_UNDEFINED;
812
813 if (no_msi)
814 irq_type = IRQ_TYPE_INTX;
815
816 data = (struct pci_endpoint_test_data *)ent->driver_data;
817 if (data) {
818 test_reg_bar = data->test_reg_bar;
819 test->test_reg_bar = test_reg_bar;
820 test->alignment = data->alignment;
821 irq_type = data->irq_type;
822 }
823
824 init_completion(&test->irq_raised);
825 mutex_init(&test->mutex);
826
827 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
828 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
829 dev_err(dev, "Cannot set DMA mask\n");
830 return -EINVAL;
831 }
832
833 err = pci_enable_device(pdev);
834 if (err) {
835 dev_err(dev, "Cannot enable PCI device\n");
836 return err;
837 }
838
839 err = pci_request_regions(pdev, DRV_MODULE_NAME);
840 if (err) {
841 dev_err(dev, "Cannot obtain PCI resources\n");
842 goto err_disable_pdev;
843 }
844
845 pci_set_master(pdev);
846
847 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
848 err = -EINVAL;
849 goto err_disable_irq;
850 }
851
852 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
853 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
854 base = pci_ioremap_bar(pdev, bar);
855 if (!base) {
856 dev_err(dev, "Failed to read BAR%d\n", bar);
857 WARN_ON(bar == test_reg_bar);
858 }
859 test->bar[bar] = base;
860 }
861 }
862
863 test->base = test->bar[test_reg_bar];
864 if (!test->base) {
865 err = -ENOMEM;
866 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
867 test_reg_bar);
868 goto err_iounmap;
869 }
870
871 pci_set_drvdata(pdev, test);
872
873 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
874 if (id < 0) {
875 err = id;
876 dev_err(dev, "Unable to get id\n");
877 goto err_iounmap;
878 }
879
880 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
881 test->name = kstrdup(name, GFP_KERNEL);
882 if (!test->name) {
883 err = -ENOMEM;
884 goto err_ida_remove;
885 }
886
887 if (!pci_endpoint_test_request_irq(test)) {
888 err = -EINVAL;
889 goto err_kfree_test_name;
890 }
891
892 misc_device = &test->miscdev;
893 misc_device->minor = MISC_DYNAMIC_MINOR;
894 misc_device->name = kstrdup(name, GFP_KERNEL);
895 if (!misc_device->name) {
896 err = -ENOMEM;
897 goto err_release_irq;
898 }
899 misc_device->parent = &pdev->dev;
900 misc_device->fops = &pci_endpoint_test_fops;
901
902 err = misc_register(misc_device);
903 if (err) {
904 dev_err(dev, "Failed to register device\n");
905 goto err_kfree_name;
906 }
907
908 return 0;
909
910err_kfree_name:
911 kfree(misc_device->name);
912
913err_release_irq:
914 pci_endpoint_test_release_irq(test);
915
916err_kfree_test_name:
917 kfree(test->name);
918
919err_ida_remove:
920 ida_free(&pci_endpoint_test_ida, id);
921
922err_iounmap:
923 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
924 if (test->bar[bar])
925 pci_iounmap(pdev, test->bar[bar]);
926 }
927
928err_disable_irq:
929 pci_endpoint_test_free_irq_vectors(test);
930 pci_release_regions(pdev);
931
932err_disable_pdev:
933 pci_disable_device(pdev);
934
935 return err;
936}
937
938static void pci_endpoint_test_remove(struct pci_dev *pdev)
939{
940 int id;
941 enum pci_barno bar;
942 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
943 struct miscdevice *misc_device = &test->miscdev;
944
945 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
946 return;
947 if (id < 0)
948 return;
949
950 pci_endpoint_test_release_irq(test);
951 pci_endpoint_test_free_irq_vectors(test);
952
953 misc_deregister(&test->miscdev);
954 kfree(misc_device->name);
955 kfree(test->name);
956 ida_free(&pci_endpoint_test_ida, id);
957 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
958 if (test->bar[bar])
959 pci_iounmap(pdev, test->bar[bar]);
960 }
961
962 pci_release_regions(pdev);
963 pci_disable_device(pdev);
964}
965
966static const struct pci_endpoint_test_data default_data = {
967 .test_reg_bar = BAR_0,
968 .alignment = SZ_4K,
969 .irq_type = IRQ_TYPE_MSI,
970};
971
972static const struct pci_endpoint_test_data am654_data = {
973 .test_reg_bar = BAR_2,
974 .alignment = SZ_64K,
975 .irq_type = IRQ_TYPE_MSI,
976};
977
978static const struct pci_endpoint_test_data j721e_data = {
979 .alignment = 256,
980 .irq_type = IRQ_TYPE_MSI,
981};
982
983static const struct pci_device_id pci_endpoint_test_tbl[] = {
984 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
985 .driver_data = (kernel_ulong_t)&default_data,
986 },
987 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
988 .driver_data = (kernel_ulong_t)&default_data,
989 },
990 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
991 .driver_data = (kernel_ulong_t)&default_data,
992 },
993 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
994 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
995 .driver_data = (kernel_ulong_t)&default_data,
996 },
997 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
998 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
999 .driver_data = (kernel_ulong_t)&am654_data
1000 },
1001 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1002 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1003 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1004 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1005 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1006 .driver_data = (kernel_ulong_t)&default_data,
1007 },
1008 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1009 .driver_data = (kernel_ulong_t)&j721e_data,
1010 },
1011 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1012 .driver_data = (kernel_ulong_t)&j721e_data,
1013 },
1014 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1015 .driver_data = (kernel_ulong_t)&j721e_data,
1016 },
1017 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1018 .driver_data = (kernel_ulong_t)&j721e_data,
1019 },
1020 { }
1021};
1022MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1023
1024static struct pci_driver pci_endpoint_test_driver = {
1025 .name = DRV_MODULE_NAME,
1026 .id_table = pci_endpoint_test_tbl,
1027 .probe = pci_endpoint_test_probe,
1028 .remove = pci_endpoint_test_remove,
1029 .sriov_configure = pci_sriov_configure_simple,
1030};
1031module_pci_driver(pci_endpoint_test_driver);
1032
1033MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1034MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1035MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/**
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/fs.h>
12#include <linux/io.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/random.h>
19#include <linux/slab.h>
20#include <linux/pci.h>
21#include <linux/pci_ids.h>
22
23#include <linux/pci_regs.h>
24
25#include <uapi/linux/pcitest.h>
26
27#define DRV_MODULE_NAME "pci-endpoint-test"
28
29#define IRQ_TYPE_UNDEFINED -1
30#define IRQ_TYPE_LEGACY 0
31#define IRQ_TYPE_MSI 1
32#define IRQ_TYPE_MSIX 2
33
34#define PCI_ENDPOINT_TEST_MAGIC 0x0
35
36#define PCI_ENDPOINT_TEST_COMMAND 0x4
37#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
38#define COMMAND_RAISE_MSI_IRQ BIT(1)
39#define COMMAND_RAISE_MSIX_IRQ BIT(2)
40#define COMMAND_READ BIT(3)
41#define COMMAND_WRITE BIT(4)
42#define COMMAND_COPY BIT(5)
43
44#define PCI_ENDPOINT_TEST_STATUS 0x8
45#define STATUS_READ_SUCCESS BIT(0)
46#define STATUS_READ_FAIL BIT(1)
47#define STATUS_WRITE_SUCCESS BIT(2)
48#define STATUS_WRITE_FAIL BIT(3)
49#define STATUS_COPY_SUCCESS BIT(4)
50#define STATUS_COPY_FAIL BIT(5)
51#define STATUS_IRQ_RAISED BIT(6)
52#define STATUS_SRC_ADDR_INVALID BIT(7)
53#define STATUS_DST_ADDR_INVALID BIT(8)
54
55#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
56#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
57
58#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
59#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
60
61#define PCI_ENDPOINT_TEST_SIZE 0x1c
62#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
63
64#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
65#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
66
67#define PCI_DEVICE_ID_TI_AM654 0xb00c
68
69#define is_am654_pci_dev(pdev) \
70 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
71
72static DEFINE_IDA(pci_endpoint_test_ida);
73
74#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
75 miscdev)
76
77static bool no_msi;
78module_param(no_msi, bool, 0444);
79MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
80
81static int irq_type = IRQ_TYPE_MSI;
82module_param(irq_type, int, 0444);
83MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
84
85enum pci_barno {
86 BAR_0,
87 BAR_1,
88 BAR_2,
89 BAR_3,
90 BAR_4,
91 BAR_5,
92};
93
94struct pci_endpoint_test {
95 struct pci_dev *pdev;
96 void __iomem *base;
97 void __iomem *bar[6];
98 struct completion irq_raised;
99 int last_irq;
100 int num_irqs;
101 /* mutex to protect the ioctls */
102 struct mutex mutex;
103 struct miscdevice miscdev;
104 enum pci_barno test_reg_bar;
105 size_t alignment;
106};
107
108struct pci_endpoint_test_data {
109 enum pci_barno test_reg_bar;
110 size_t alignment;
111 int irq_type;
112};
113
114static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
115 u32 offset)
116{
117 return readl(test->base + offset);
118}
119
120static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
121 u32 offset, u32 value)
122{
123 writel(value, test->base + offset);
124}
125
126static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
127 int bar, int offset)
128{
129 return readl(test->bar[bar] + offset);
130}
131
132static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
133 int bar, u32 offset, u32 value)
134{
135 writel(value, test->bar[bar] + offset);
136}
137
138static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
139{
140 struct pci_endpoint_test *test = dev_id;
141 u32 reg;
142
143 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
144 if (reg & STATUS_IRQ_RAISED) {
145 test->last_irq = irq;
146 complete(&test->irq_raised);
147 reg &= ~STATUS_IRQ_RAISED;
148 }
149 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
150 reg);
151
152 return IRQ_HANDLED;
153}
154
155static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
156{
157 struct pci_dev *pdev = test->pdev;
158
159 pci_free_irq_vectors(pdev);
160}
161
162static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
163 int type)
164{
165 int irq = -1;
166 struct pci_dev *pdev = test->pdev;
167 struct device *dev = &pdev->dev;
168 bool res = true;
169
170 switch (type) {
171 case IRQ_TYPE_LEGACY:
172 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
173 if (irq < 0)
174 dev_err(dev, "Failed to get Legacy interrupt\n");
175 break;
176 case IRQ_TYPE_MSI:
177 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
178 if (irq < 0)
179 dev_err(dev, "Failed to get MSI interrupts\n");
180 break;
181 case IRQ_TYPE_MSIX:
182 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
183 if (irq < 0)
184 dev_err(dev, "Failed to get MSI-X interrupts\n");
185 break;
186 default:
187 dev_err(dev, "Invalid IRQ type selected\n");
188 }
189
190 if (irq < 0) {
191 irq = 0;
192 res = false;
193 }
194 test->num_irqs = irq;
195
196 return res;
197}
198
199static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
200{
201 int i;
202 struct pci_dev *pdev = test->pdev;
203 struct device *dev = &pdev->dev;
204
205 for (i = 0; i < test->num_irqs; i++)
206 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
207
208 test->num_irqs = 0;
209}
210
211static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
212{
213 int i;
214 int err;
215 struct pci_dev *pdev = test->pdev;
216 struct device *dev = &pdev->dev;
217
218 for (i = 0; i < test->num_irqs; i++) {
219 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
220 pci_endpoint_test_irqhandler,
221 IRQF_SHARED, DRV_MODULE_NAME, test);
222 if (err)
223 goto fail;
224 }
225
226 return true;
227
228fail:
229 switch (irq_type) {
230 case IRQ_TYPE_LEGACY:
231 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
232 pci_irq_vector(pdev, i));
233 break;
234 case IRQ_TYPE_MSI:
235 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
236 pci_irq_vector(pdev, i),
237 i + 1);
238 break;
239 case IRQ_TYPE_MSIX:
240 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
241 pci_irq_vector(pdev, i),
242 i + 1);
243 break;
244 }
245
246 return false;
247}
248
249static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
250 enum pci_barno barno)
251{
252 int j;
253 u32 val;
254 int size;
255 struct pci_dev *pdev = test->pdev;
256
257 if (!test->bar[barno])
258 return false;
259
260 size = pci_resource_len(pdev, barno);
261
262 if (barno == test->test_reg_bar)
263 size = 0x4;
264
265 for (j = 0; j < size; j += 4)
266 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
267
268 for (j = 0; j < size; j += 4) {
269 val = pci_endpoint_test_bar_readl(test, barno, j);
270 if (val != 0xA0A0A0A0)
271 return false;
272 }
273
274 return true;
275}
276
277static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
278{
279 u32 val;
280
281 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
282 IRQ_TYPE_LEGACY);
283 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
284 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
285 COMMAND_RAISE_LEGACY_IRQ);
286 val = wait_for_completion_timeout(&test->irq_raised,
287 msecs_to_jiffies(1000));
288 if (!val)
289 return false;
290
291 return true;
292}
293
294static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
295 u16 msi_num, bool msix)
296{
297 u32 val;
298 struct pci_dev *pdev = test->pdev;
299
300 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
301 msix == false ? IRQ_TYPE_MSI :
302 IRQ_TYPE_MSIX);
303 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
304 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
305 msix == false ? COMMAND_RAISE_MSI_IRQ :
306 COMMAND_RAISE_MSIX_IRQ);
307 val = wait_for_completion_timeout(&test->irq_raised,
308 msecs_to_jiffies(1000));
309 if (!val)
310 return false;
311
312 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
313 return true;
314
315 return false;
316}
317
318static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
319{
320 bool ret = false;
321 void *src_addr;
322 void *dst_addr;
323 dma_addr_t src_phys_addr;
324 dma_addr_t dst_phys_addr;
325 struct pci_dev *pdev = test->pdev;
326 struct device *dev = &pdev->dev;
327 void *orig_src_addr;
328 dma_addr_t orig_src_phys_addr;
329 void *orig_dst_addr;
330 dma_addr_t orig_dst_phys_addr;
331 size_t offset;
332 size_t alignment = test->alignment;
333 u32 src_crc32;
334 u32 dst_crc32;
335
336 if (size > SIZE_MAX - alignment)
337 goto err;
338
339 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
340 dev_err(dev, "Invalid IRQ type option\n");
341 goto err;
342 }
343
344 orig_src_addr = dma_alloc_coherent(dev, size + alignment,
345 &orig_src_phys_addr, GFP_KERNEL);
346 if (!orig_src_addr) {
347 dev_err(dev, "Failed to allocate source buffer\n");
348 ret = false;
349 goto err;
350 }
351
352 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
353 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
354 offset = src_phys_addr - orig_src_phys_addr;
355 src_addr = orig_src_addr + offset;
356 } else {
357 src_phys_addr = orig_src_phys_addr;
358 src_addr = orig_src_addr;
359 }
360
361 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
362 lower_32_bits(src_phys_addr));
363
364 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
365 upper_32_bits(src_phys_addr));
366
367 get_random_bytes(src_addr, size);
368 src_crc32 = crc32_le(~0, src_addr, size);
369
370 orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
371 &orig_dst_phys_addr, GFP_KERNEL);
372 if (!orig_dst_addr) {
373 dev_err(dev, "Failed to allocate destination address\n");
374 ret = false;
375 goto err_orig_src_addr;
376 }
377
378 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
379 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
380 offset = dst_phys_addr - orig_dst_phys_addr;
381 dst_addr = orig_dst_addr + offset;
382 } else {
383 dst_phys_addr = orig_dst_phys_addr;
384 dst_addr = orig_dst_addr;
385 }
386
387 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
388 lower_32_bits(dst_phys_addr));
389 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
390 upper_32_bits(dst_phys_addr));
391
392 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
393 size);
394
395 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
396 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
397 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
398 COMMAND_COPY);
399
400 wait_for_completion(&test->irq_raised);
401
402 dst_crc32 = crc32_le(~0, dst_addr, size);
403 if (dst_crc32 == src_crc32)
404 ret = true;
405
406 dma_free_coherent(dev, size + alignment, orig_dst_addr,
407 orig_dst_phys_addr);
408
409err_orig_src_addr:
410 dma_free_coherent(dev, size + alignment, orig_src_addr,
411 orig_src_phys_addr);
412
413err:
414 return ret;
415}
416
417static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
418{
419 bool ret = false;
420 u32 reg;
421 void *addr;
422 dma_addr_t phys_addr;
423 struct pci_dev *pdev = test->pdev;
424 struct device *dev = &pdev->dev;
425 void *orig_addr;
426 dma_addr_t orig_phys_addr;
427 size_t offset;
428 size_t alignment = test->alignment;
429 u32 crc32;
430
431 if (size > SIZE_MAX - alignment)
432 goto err;
433
434 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
435 dev_err(dev, "Invalid IRQ type option\n");
436 goto err;
437 }
438
439 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
440 GFP_KERNEL);
441 if (!orig_addr) {
442 dev_err(dev, "Failed to allocate address\n");
443 ret = false;
444 goto err;
445 }
446
447 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
448 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
449 offset = phys_addr - orig_phys_addr;
450 addr = orig_addr + offset;
451 } else {
452 phys_addr = orig_phys_addr;
453 addr = orig_addr;
454 }
455
456 get_random_bytes(addr, size);
457
458 crc32 = crc32_le(~0, addr, size);
459 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
460 crc32);
461
462 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
463 lower_32_bits(phys_addr));
464 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
465 upper_32_bits(phys_addr));
466
467 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
468
469 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
470 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
471 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
472 COMMAND_READ);
473
474 wait_for_completion(&test->irq_raised);
475
476 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
477 if (reg & STATUS_READ_SUCCESS)
478 ret = true;
479
480 dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
481
482err:
483 return ret;
484}
485
486static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
487{
488 bool ret = false;
489 void *addr;
490 dma_addr_t phys_addr;
491 struct pci_dev *pdev = test->pdev;
492 struct device *dev = &pdev->dev;
493 void *orig_addr;
494 dma_addr_t orig_phys_addr;
495 size_t offset;
496 size_t alignment = test->alignment;
497 u32 crc32;
498
499 if (size > SIZE_MAX - alignment)
500 goto err;
501
502 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
503 dev_err(dev, "Invalid IRQ type option\n");
504 goto err;
505 }
506
507 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
508 GFP_KERNEL);
509 if (!orig_addr) {
510 dev_err(dev, "Failed to allocate destination address\n");
511 ret = false;
512 goto err;
513 }
514
515 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
516 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
517 offset = phys_addr - orig_phys_addr;
518 addr = orig_addr + offset;
519 } else {
520 phys_addr = orig_phys_addr;
521 addr = orig_addr;
522 }
523
524 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
525 lower_32_bits(phys_addr));
526 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
527 upper_32_bits(phys_addr));
528
529 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
530
531 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
532 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
533 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
534 COMMAND_WRITE);
535
536 wait_for_completion(&test->irq_raised);
537
538 crc32 = crc32_le(~0, addr, size);
539 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
540 ret = true;
541
542 dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
543err:
544 return ret;
545}
546
547static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
548 int req_irq_type)
549{
550 struct pci_dev *pdev = test->pdev;
551 struct device *dev = &pdev->dev;
552
553 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
554 dev_err(dev, "Invalid IRQ type option\n");
555 return false;
556 }
557
558 if (irq_type == req_irq_type)
559 return true;
560
561 pci_endpoint_test_release_irq(test);
562 pci_endpoint_test_free_irq_vectors(test);
563
564 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
565 goto err;
566
567 if (!pci_endpoint_test_request_irq(test))
568 goto err;
569
570 irq_type = req_irq_type;
571 return true;
572
573err:
574 pci_endpoint_test_free_irq_vectors(test);
575 irq_type = IRQ_TYPE_UNDEFINED;
576 return false;
577}
578
579static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
580 unsigned long arg)
581{
582 int ret = -EINVAL;
583 enum pci_barno bar;
584 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
585 struct pci_dev *pdev = test->pdev;
586
587 mutex_lock(&test->mutex);
588 switch (cmd) {
589 case PCITEST_BAR:
590 bar = arg;
591 if (bar < 0 || bar > 5)
592 goto ret;
593 if (is_am654_pci_dev(pdev) && bar == BAR_0)
594 goto ret;
595 ret = pci_endpoint_test_bar(test, bar);
596 break;
597 case PCITEST_LEGACY_IRQ:
598 ret = pci_endpoint_test_legacy_irq(test);
599 break;
600 case PCITEST_MSI:
601 case PCITEST_MSIX:
602 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
603 break;
604 case PCITEST_WRITE:
605 ret = pci_endpoint_test_write(test, arg);
606 break;
607 case PCITEST_READ:
608 ret = pci_endpoint_test_read(test, arg);
609 break;
610 case PCITEST_COPY:
611 ret = pci_endpoint_test_copy(test, arg);
612 break;
613 case PCITEST_SET_IRQTYPE:
614 ret = pci_endpoint_test_set_irq(test, arg);
615 break;
616 case PCITEST_GET_IRQTYPE:
617 ret = irq_type;
618 break;
619 }
620
621ret:
622 mutex_unlock(&test->mutex);
623 return ret;
624}
625
626static const struct file_operations pci_endpoint_test_fops = {
627 .owner = THIS_MODULE,
628 .unlocked_ioctl = pci_endpoint_test_ioctl,
629};
630
631static int pci_endpoint_test_probe(struct pci_dev *pdev,
632 const struct pci_device_id *ent)
633{
634 int err;
635 int id;
636 char name[20];
637 enum pci_barno bar;
638 void __iomem *base;
639 struct device *dev = &pdev->dev;
640 struct pci_endpoint_test *test;
641 struct pci_endpoint_test_data *data;
642 enum pci_barno test_reg_bar = BAR_0;
643 struct miscdevice *misc_device;
644
645 if (pci_is_bridge(pdev))
646 return -ENODEV;
647
648 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
649 if (!test)
650 return -ENOMEM;
651
652 test->test_reg_bar = 0;
653 test->alignment = 0;
654 test->pdev = pdev;
655
656 if (no_msi)
657 irq_type = IRQ_TYPE_LEGACY;
658
659 data = (struct pci_endpoint_test_data *)ent->driver_data;
660 if (data) {
661 test_reg_bar = data->test_reg_bar;
662 test->test_reg_bar = test_reg_bar;
663 test->alignment = data->alignment;
664 irq_type = data->irq_type;
665 }
666
667 init_completion(&test->irq_raised);
668 mutex_init(&test->mutex);
669
670 err = pci_enable_device(pdev);
671 if (err) {
672 dev_err(dev, "Cannot enable PCI device\n");
673 return err;
674 }
675
676 err = pci_request_regions(pdev, DRV_MODULE_NAME);
677 if (err) {
678 dev_err(dev, "Cannot obtain PCI resources\n");
679 goto err_disable_pdev;
680 }
681
682 pci_set_master(pdev);
683
684 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
685 goto err_disable_irq;
686
687 if (!pci_endpoint_test_request_irq(test))
688 goto err_disable_irq;
689
690 for (bar = BAR_0; bar <= BAR_5; bar++) {
691 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
692 base = pci_ioremap_bar(pdev, bar);
693 if (!base) {
694 dev_err(dev, "Failed to read BAR%d\n", bar);
695 WARN_ON(bar == test_reg_bar);
696 }
697 test->bar[bar] = base;
698 }
699 }
700
701 test->base = test->bar[test_reg_bar];
702 if (!test->base) {
703 err = -ENOMEM;
704 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
705 test_reg_bar);
706 goto err_iounmap;
707 }
708
709 pci_set_drvdata(pdev, test);
710
711 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
712 if (id < 0) {
713 err = id;
714 dev_err(dev, "Unable to get id\n");
715 goto err_iounmap;
716 }
717
718 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
719 misc_device = &test->miscdev;
720 misc_device->minor = MISC_DYNAMIC_MINOR;
721 misc_device->name = kstrdup(name, GFP_KERNEL);
722 if (!misc_device->name) {
723 err = -ENOMEM;
724 goto err_ida_remove;
725 }
726 misc_device->fops = &pci_endpoint_test_fops,
727
728 err = misc_register(misc_device);
729 if (err) {
730 dev_err(dev, "Failed to register device\n");
731 goto err_kfree_name;
732 }
733
734 return 0;
735
736err_kfree_name:
737 kfree(misc_device->name);
738
739err_ida_remove:
740 ida_simple_remove(&pci_endpoint_test_ida, id);
741
742err_iounmap:
743 for (bar = BAR_0; bar <= BAR_5; bar++) {
744 if (test->bar[bar])
745 pci_iounmap(pdev, test->bar[bar]);
746 }
747 pci_endpoint_test_release_irq(test);
748
749err_disable_irq:
750 pci_endpoint_test_free_irq_vectors(test);
751 pci_release_regions(pdev);
752
753err_disable_pdev:
754 pci_disable_device(pdev);
755
756 return err;
757}
758
759static void pci_endpoint_test_remove(struct pci_dev *pdev)
760{
761 int id;
762 enum pci_barno bar;
763 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
764 struct miscdevice *misc_device = &test->miscdev;
765
766 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
767 return;
768 if (id < 0)
769 return;
770
771 misc_deregister(&test->miscdev);
772 kfree(misc_device->name);
773 ida_simple_remove(&pci_endpoint_test_ida, id);
774 for (bar = BAR_0; bar <= BAR_5; bar++) {
775 if (test->bar[bar])
776 pci_iounmap(pdev, test->bar[bar]);
777 }
778
779 pci_endpoint_test_release_irq(test);
780 pci_endpoint_test_free_irq_vectors(test);
781
782 pci_release_regions(pdev);
783 pci_disable_device(pdev);
784}
785
786static const struct pci_endpoint_test_data am654_data = {
787 .test_reg_bar = BAR_2,
788 .alignment = SZ_64K,
789 .irq_type = IRQ_TYPE_MSI,
790};
791
792static const struct pci_device_id pci_endpoint_test_tbl[] = {
793 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
794 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
795 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
796 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
797 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
798 .driver_data = (kernel_ulong_t)&am654_data
799 },
800 { }
801};
802MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
803
804static struct pci_driver pci_endpoint_test_driver = {
805 .name = DRV_MODULE_NAME,
806 .id_table = pci_endpoint_test_tbl,
807 .probe = pci_endpoint_test_probe,
808 .remove = pci_endpoint_test_remove,
809};
810module_pci_driver(pci_endpoint_test_driver);
811
812MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
813MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
814MODULE_LICENSE("GPL v2");