Loading...
1// SPDX-License-Identifier: GPL-2.0
2/**
3 * Test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/pci_ids.h>
15#include <linux/random.h>
16
17#include <linux/pci-epc.h>
18#include <linux/pci-epf.h>
19#include <linux/pci_regs.h>
20
21#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
22#define COMMAND_RAISE_MSI_IRQ BIT(1)
23#define MSI_NUMBER_SHIFT 2
24#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
25#define COMMAND_READ BIT(8)
26#define COMMAND_WRITE BIT(9)
27#define COMMAND_COPY BIT(10)
28
29#define STATUS_READ_SUCCESS BIT(0)
30#define STATUS_READ_FAIL BIT(1)
31#define STATUS_WRITE_SUCCESS BIT(2)
32#define STATUS_WRITE_FAIL BIT(3)
33#define STATUS_COPY_SUCCESS BIT(4)
34#define STATUS_COPY_FAIL BIT(5)
35#define STATUS_IRQ_RAISED BIT(6)
36#define STATUS_SRC_ADDR_INVALID BIT(7)
37#define STATUS_DST_ADDR_INVALID BIT(8)
38
39#define TIMER_RESOLUTION 1
40
41static struct workqueue_struct *kpcitest_workqueue;
42
43struct pci_epf_test {
44 void *reg[6];
45 struct pci_epf *epf;
46 enum pci_barno test_reg_bar;
47 bool linkup_notifier;
48 struct delayed_work cmd_handler;
49};
50
51struct pci_epf_test_reg {
52 u32 magic;
53 u32 command;
54 u32 status;
55 u64 src_addr;
56 u64 dst_addr;
57 u32 size;
58 u32 checksum;
59} __packed;
60
61static struct pci_epf_header test_header = {
62 .vendorid = PCI_ANY_ID,
63 .deviceid = PCI_ANY_ID,
64 .baseclass_code = PCI_CLASS_OTHERS,
65 .interrupt_pin = PCI_INTERRUPT_INTA,
66};
67
68struct pci_epf_test_data {
69 enum pci_barno test_reg_bar;
70 bool linkup_notifier;
71};
72
73static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
74
75static int pci_epf_test_copy(struct pci_epf_test *epf_test)
76{
77 int ret;
78 void __iomem *src_addr;
79 void __iomem *dst_addr;
80 phys_addr_t src_phys_addr;
81 phys_addr_t dst_phys_addr;
82 struct pci_epf *epf = epf_test->epf;
83 struct device *dev = &epf->dev;
84 struct pci_epc *epc = epf->epc;
85 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
86 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
87
88 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
89 if (!src_addr) {
90 dev_err(dev, "failed to allocate source address\n");
91 reg->status = STATUS_SRC_ADDR_INVALID;
92 ret = -ENOMEM;
93 goto err;
94 }
95
96 ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
97 reg->size);
98 if (ret) {
99 dev_err(dev, "failed to map source address\n");
100 reg->status = STATUS_SRC_ADDR_INVALID;
101 goto err_src_addr;
102 }
103
104 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
105 if (!dst_addr) {
106 dev_err(dev, "failed to allocate destination address\n");
107 reg->status = STATUS_DST_ADDR_INVALID;
108 ret = -ENOMEM;
109 goto err_src_map_addr;
110 }
111
112 ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
113 reg->size);
114 if (ret) {
115 dev_err(dev, "failed to map destination address\n");
116 reg->status = STATUS_DST_ADDR_INVALID;
117 goto err_dst_addr;
118 }
119
120 memcpy(dst_addr, src_addr, reg->size);
121
122 pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
123
124err_dst_addr:
125 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
126
127err_src_map_addr:
128 pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
129
130err_src_addr:
131 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
132
133err:
134 return ret;
135}
136
137static int pci_epf_test_read(struct pci_epf_test *epf_test)
138{
139 int ret;
140 void __iomem *src_addr;
141 void *buf;
142 u32 crc32;
143 phys_addr_t phys_addr;
144 struct pci_epf *epf = epf_test->epf;
145 struct device *dev = &epf->dev;
146 struct pci_epc *epc = epf->epc;
147 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
148 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
149
150 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
151 if (!src_addr) {
152 dev_err(dev, "failed to allocate address\n");
153 reg->status = STATUS_SRC_ADDR_INVALID;
154 ret = -ENOMEM;
155 goto err;
156 }
157
158 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
159 reg->size);
160 if (ret) {
161 dev_err(dev, "failed to map address\n");
162 reg->status = STATUS_SRC_ADDR_INVALID;
163 goto err_addr;
164 }
165
166 buf = kzalloc(reg->size, GFP_KERNEL);
167 if (!buf) {
168 ret = -ENOMEM;
169 goto err_map_addr;
170 }
171
172 memcpy(buf, src_addr, reg->size);
173
174 crc32 = crc32_le(~0, buf, reg->size);
175 if (crc32 != reg->checksum)
176 ret = -EIO;
177
178 kfree(buf);
179
180err_map_addr:
181 pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
182
183err_addr:
184 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
185
186err:
187 return ret;
188}
189
190static int pci_epf_test_write(struct pci_epf_test *epf_test)
191{
192 int ret;
193 void __iomem *dst_addr;
194 void *buf;
195 phys_addr_t phys_addr;
196 struct pci_epf *epf = epf_test->epf;
197 struct device *dev = &epf->dev;
198 struct pci_epc *epc = epf->epc;
199 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
200 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
201
202 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
203 if (!dst_addr) {
204 dev_err(dev, "failed to allocate address\n");
205 reg->status = STATUS_DST_ADDR_INVALID;
206 ret = -ENOMEM;
207 goto err;
208 }
209
210 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
211 reg->size);
212 if (ret) {
213 dev_err(dev, "failed to map address\n");
214 reg->status = STATUS_DST_ADDR_INVALID;
215 goto err_addr;
216 }
217
218 buf = kzalloc(reg->size, GFP_KERNEL);
219 if (!buf) {
220 ret = -ENOMEM;
221 goto err_map_addr;
222 }
223
224 get_random_bytes(buf, reg->size);
225 reg->checksum = crc32_le(~0, buf, reg->size);
226
227 memcpy(dst_addr, buf, reg->size);
228
229 /*
230 * wait 1ms inorder for the write to complete. Without this delay L3
231 * error in observed in the host system.
232 */
233 mdelay(1);
234
235 kfree(buf);
236
237err_map_addr:
238 pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
239
240err_addr:
241 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
242
243err:
244 return ret;
245}
246
247static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
248{
249 u8 msi_count;
250 struct pci_epf *epf = epf_test->epf;
251 struct pci_epc *epc = epf->epc;
252 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
253 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
254
255 reg->status |= STATUS_IRQ_RAISED;
256 msi_count = pci_epc_get_msi(epc, epf->func_no);
257 if (irq > msi_count || msi_count <= 0)
258 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
259 else
260 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
261}
262
263static void pci_epf_test_cmd_handler(struct work_struct *work)
264{
265 int ret;
266 u8 irq;
267 u8 msi_count;
268 u32 command;
269 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
270 cmd_handler.work);
271 struct pci_epf *epf = epf_test->epf;
272 struct pci_epc *epc = epf->epc;
273 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
274 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
275
276 command = reg->command;
277 if (!command)
278 goto reset_handler;
279
280 reg->command = 0;
281 reg->status = 0;
282
283 irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
284
285 if (command & COMMAND_RAISE_LEGACY_IRQ) {
286 reg->status = STATUS_IRQ_RAISED;
287 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
288 goto reset_handler;
289 }
290
291 if (command & COMMAND_WRITE) {
292 ret = pci_epf_test_write(epf_test);
293 if (ret)
294 reg->status |= STATUS_WRITE_FAIL;
295 else
296 reg->status |= STATUS_WRITE_SUCCESS;
297 pci_epf_test_raise_irq(epf_test, irq);
298 goto reset_handler;
299 }
300
301 if (command & COMMAND_READ) {
302 ret = pci_epf_test_read(epf_test);
303 if (!ret)
304 reg->status |= STATUS_READ_SUCCESS;
305 else
306 reg->status |= STATUS_READ_FAIL;
307 pci_epf_test_raise_irq(epf_test, irq);
308 goto reset_handler;
309 }
310
311 if (command & COMMAND_COPY) {
312 ret = pci_epf_test_copy(epf_test);
313 if (!ret)
314 reg->status |= STATUS_COPY_SUCCESS;
315 else
316 reg->status |= STATUS_COPY_FAIL;
317 pci_epf_test_raise_irq(epf_test, irq);
318 goto reset_handler;
319 }
320
321 if (command & COMMAND_RAISE_MSI_IRQ) {
322 msi_count = pci_epc_get_msi(epc, epf->func_no);
323 if (irq > msi_count || msi_count <= 0)
324 goto reset_handler;
325 reg->status = STATUS_IRQ_RAISED;
326 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
327 goto reset_handler;
328 }
329
330reset_handler:
331 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
332 msecs_to_jiffies(1));
333}
334
335static void pci_epf_test_linkup(struct pci_epf *epf)
336{
337 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
338
339 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
340 msecs_to_jiffies(1));
341}
342
343static void pci_epf_test_unbind(struct pci_epf *epf)
344{
345 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
346 struct pci_epc *epc = epf->epc;
347 struct pci_epf_bar *epf_bar;
348 int bar;
349
350 cancel_delayed_work(&epf_test->cmd_handler);
351 pci_epc_stop(epc);
352 for (bar = BAR_0; bar <= BAR_5; bar++) {
353 epf_bar = &epf->bar[bar];
354
355 if (epf_test->reg[bar]) {
356 pci_epf_free_space(epf, epf_test->reg[bar], bar);
357 pci_epc_clear_bar(epc, epf->func_no, epf_bar);
358 }
359 }
360}
361
362static int pci_epf_test_set_bar(struct pci_epf *epf)
363{
364 int bar;
365 int ret;
366 struct pci_epf_bar *epf_bar;
367 struct pci_epc *epc = epf->epc;
368 struct device *dev = &epf->dev;
369 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
370 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
371
372 for (bar = BAR_0; bar <= BAR_5; bar++) {
373 epf_bar = &epf->bar[bar];
374
375 epf_bar->flags |= upper_32_bits(epf_bar->size) ?
376 PCI_BASE_ADDRESS_MEM_TYPE_64 :
377 PCI_BASE_ADDRESS_MEM_TYPE_32;
378
379 ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
380 if (ret) {
381 pci_epf_free_space(epf, epf_test->reg[bar], bar);
382 dev_err(dev, "failed to set BAR%d\n", bar);
383 if (bar == test_reg_bar)
384 return ret;
385 }
386 /*
387 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
388 * if the specific implementation required a 64-bit BAR,
389 * even if we only requested a 32-bit BAR.
390 */
391 if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
392 bar++;
393 }
394
395 return 0;
396}
397
398static int pci_epf_test_alloc_space(struct pci_epf *epf)
399{
400 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
401 struct device *dev = &epf->dev;
402 void *base;
403 int bar;
404 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
405
406 base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
407 test_reg_bar);
408 if (!base) {
409 dev_err(dev, "failed to allocated register space\n");
410 return -ENOMEM;
411 }
412 epf_test->reg[test_reg_bar] = base;
413
414 for (bar = BAR_0; bar <= BAR_5; bar++) {
415 if (bar == test_reg_bar)
416 continue;
417 base = pci_epf_alloc_space(epf, bar_size[bar], bar);
418 if (!base)
419 dev_err(dev, "failed to allocate space for BAR%d\n",
420 bar);
421 epf_test->reg[bar] = base;
422 }
423
424 return 0;
425}
426
427static int pci_epf_test_bind(struct pci_epf *epf)
428{
429 int ret;
430 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
431 struct pci_epf_header *header = epf->header;
432 struct pci_epc *epc = epf->epc;
433 struct device *dev = &epf->dev;
434
435 if (WARN_ON_ONCE(!epc))
436 return -EINVAL;
437
438 ret = pci_epc_write_header(epc, epf->func_no, header);
439 if (ret) {
440 dev_err(dev, "configuration header write failed\n");
441 return ret;
442 }
443
444 ret = pci_epf_test_alloc_space(epf);
445 if (ret)
446 return ret;
447
448 ret = pci_epf_test_set_bar(epf);
449 if (ret)
450 return ret;
451
452 ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
453 if (ret)
454 return ret;
455
456 if (!epf_test->linkup_notifier)
457 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
458
459 return 0;
460}
461
462static const struct pci_epf_device_id pci_epf_test_ids[] = {
463 {
464 .name = "pci_epf_test",
465 },
466 {},
467};
468
469static int pci_epf_test_probe(struct pci_epf *epf)
470{
471 struct pci_epf_test *epf_test;
472 struct device *dev = &epf->dev;
473 const struct pci_epf_device_id *match;
474 struct pci_epf_test_data *data;
475 enum pci_barno test_reg_bar = BAR_0;
476 bool linkup_notifier = true;
477
478 match = pci_epf_match_device(pci_epf_test_ids, epf);
479 data = (struct pci_epf_test_data *)match->driver_data;
480 if (data) {
481 test_reg_bar = data->test_reg_bar;
482 linkup_notifier = data->linkup_notifier;
483 }
484
485 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
486 if (!epf_test)
487 return -ENOMEM;
488
489 epf->header = &test_header;
490 epf_test->epf = epf;
491 epf_test->test_reg_bar = test_reg_bar;
492 epf_test->linkup_notifier = linkup_notifier;
493
494 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
495
496 epf_set_drvdata(epf, epf_test);
497 return 0;
498}
499
500static struct pci_epf_ops ops = {
501 .unbind = pci_epf_test_unbind,
502 .bind = pci_epf_test_bind,
503 .linkup = pci_epf_test_linkup,
504};
505
506static struct pci_epf_driver test_driver = {
507 .driver.name = "pci_epf_test",
508 .probe = pci_epf_test_probe,
509 .id_table = pci_epf_test_ids,
510 .ops = &ops,
511 .owner = THIS_MODULE,
512};
513
514static int __init pci_epf_test_init(void)
515{
516 int ret;
517
518 kpcitest_workqueue = alloc_workqueue("kpcitest",
519 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
520 ret = pci_epf_register_driver(&test_driver);
521 if (ret) {
522 pr_err("failed to register pci epf test driver --> %d\n", ret);
523 return ret;
524 }
525
526 return 0;
527}
528module_init(pci_epf_test_init);
529
530static void __exit pci_epf_test_exit(void)
531{
532 pci_epf_unregister_driver(&test_driver);
533}
534module_exit(pci_epf_test_exit);
535
536MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
537MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
538MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/pci_ids.h>
16#include <linux/random.h>
17
18#include <linux/pci-epc.h>
19#include <linux/pci-epf.h>
20#include <linux/pci_regs.h>
21
22#define IRQ_TYPE_INTX 0
23#define IRQ_TYPE_MSI 1
24#define IRQ_TYPE_MSIX 2
25
26#define COMMAND_RAISE_INTX_IRQ BIT(0)
27#define COMMAND_RAISE_MSI_IRQ BIT(1)
28#define COMMAND_RAISE_MSIX_IRQ BIT(2)
29#define COMMAND_READ BIT(3)
30#define COMMAND_WRITE BIT(4)
31#define COMMAND_COPY BIT(5)
32
33#define STATUS_READ_SUCCESS BIT(0)
34#define STATUS_READ_FAIL BIT(1)
35#define STATUS_WRITE_SUCCESS BIT(2)
36#define STATUS_WRITE_FAIL BIT(3)
37#define STATUS_COPY_SUCCESS BIT(4)
38#define STATUS_COPY_FAIL BIT(5)
39#define STATUS_IRQ_RAISED BIT(6)
40#define STATUS_SRC_ADDR_INVALID BIT(7)
41#define STATUS_DST_ADDR_INVALID BIT(8)
42
43#define FLAG_USE_DMA BIT(0)
44
45#define TIMER_RESOLUTION 1
46
47static struct workqueue_struct *kpcitest_workqueue;
48
49struct pci_epf_test {
50 void *reg[PCI_STD_NUM_BARS];
51 struct pci_epf *epf;
52 enum pci_barno test_reg_bar;
53 size_t msix_table_offset;
54 struct delayed_work cmd_handler;
55 struct dma_chan *dma_chan_tx;
56 struct dma_chan *dma_chan_rx;
57 struct dma_chan *transfer_chan;
58 dma_cookie_t transfer_cookie;
59 enum dma_status transfer_status;
60 struct completion transfer_complete;
61 bool dma_supported;
62 bool dma_private;
63 const struct pci_epc_features *epc_features;
64};
65
66struct pci_epf_test_reg {
67 u32 magic;
68 u32 command;
69 u32 status;
70 u64 src_addr;
71 u64 dst_addr;
72 u32 size;
73 u32 checksum;
74 u32 irq_type;
75 u32 irq_number;
76 u32 flags;
77} __packed;
78
79static struct pci_epf_header test_header = {
80 .vendorid = PCI_ANY_ID,
81 .deviceid = PCI_ANY_ID,
82 .baseclass_code = PCI_CLASS_OTHERS,
83 .interrupt_pin = PCI_INTERRUPT_INTA,
84};
85
86static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
87
88static void pci_epf_test_dma_callback(void *param)
89{
90 struct pci_epf_test *epf_test = param;
91 struct dma_tx_state state;
92
93 epf_test->transfer_status =
94 dmaengine_tx_status(epf_test->transfer_chan,
95 epf_test->transfer_cookie, &state);
96 if (epf_test->transfer_status == DMA_COMPLETE ||
97 epf_test->transfer_status == DMA_ERROR)
98 complete(&epf_test->transfer_complete);
99}
100
101/**
102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
103 * data between PCIe EP and remote PCIe RC
104 * @epf_test: the EPF test device that performs the data transfer operation
105 * @dma_dst: The destination address of the data transfer. It can be a physical
106 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
107 * @dma_src: The source address of the data transfer. It can be a physical
108 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
109 * @len: The size of the data transfer
110 * @dma_remote: remote RC physical address
111 * @dir: DMA transfer direction
112 *
113 * Function that uses dmaengine API to transfer data between PCIe EP and remote
114 * PCIe RC. The source and destination address can be a physical address given
115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
116 *
117 * The function returns '0' on success and negative value on failure.
118 */
119static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
120 dma_addr_t dma_dst, dma_addr_t dma_src,
121 size_t len, dma_addr_t dma_remote,
122 enum dma_transfer_direction dir)
123{
124 struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
125 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
126 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
127 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
128 struct pci_epf *epf = epf_test->epf;
129 struct dma_async_tx_descriptor *tx;
130 struct dma_slave_config sconf = {};
131 struct device *dev = &epf->dev;
132 int ret;
133
134 if (IS_ERR_OR_NULL(chan)) {
135 dev_err(dev, "Invalid DMA memcpy channel\n");
136 return -EINVAL;
137 }
138
139 if (epf_test->dma_private) {
140 sconf.direction = dir;
141 if (dir == DMA_MEM_TO_DEV)
142 sconf.dst_addr = dma_remote;
143 else
144 sconf.src_addr = dma_remote;
145
146 if (dmaengine_slave_config(chan, &sconf)) {
147 dev_err(dev, "DMA slave config fail\n");
148 return -EIO;
149 }
150 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
151 flags);
152 } else {
153 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
154 flags);
155 }
156
157 if (!tx) {
158 dev_err(dev, "Failed to prepare DMA memcpy\n");
159 return -EIO;
160 }
161
162 reinit_completion(&epf_test->transfer_complete);
163 epf_test->transfer_chan = chan;
164 tx->callback = pci_epf_test_dma_callback;
165 tx->callback_param = epf_test;
166 epf_test->transfer_cookie = dmaengine_submit(tx);
167
168 ret = dma_submit_error(epf_test->transfer_cookie);
169 if (ret) {
170 dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
171 goto terminate;
172 }
173
174 dma_async_issue_pending(chan);
175 ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
176 if (ret < 0) {
177 dev_err(dev, "DMA wait_for_completion interrupted\n");
178 goto terminate;
179 }
180
181 if (epf_test->transfer_status == DMA_ERROR) {
182 dev_err(dev, "DMA transfer failed\n");
183 ret = -EIO;
184 }
185
186terminate:
187 dmaengine_terminate_sync(chan);
188
189 return ret;
190}
191
192struct epf_dma_filter {
193 struct device *dev;
194 u32 dma_mask;
195};
196
197static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
198{
199 struct epf_dma_filter *filter = node;
200 struct dma_slave_caps caps;
201
202 memset(&caps, 0, sizeof(caps));
203 dma_get_slave_caps(chan, &caps);
204
205 return chan->device->dev == filter->dev
206 && (filter->dma_mask & caps.directions);
207}
208
209/**
210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
211 * @epf_test: the EPF test device that performs data transfer operation
212 *
213 * Function to initialize EPF test DMA channel.
214 */
215static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
216{
217 struct pci_epf *epf = epf_test->epf;
218 struct device *dev = &epf->dev;
219 struct epf_dma_filter filter;
220 struct dma_chan *dma_chan;
221 dma_cap_mask_t mask;
222 int ret;
223
224 filter.dev = epf->epc->dev.parent;
225 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
226
227 dma_cap_zero(mask);
228 dma_cap_set(DMA_SLAVE, mask);
229 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
230 if (!dma_chan) {
231 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
232 goto fail_back_tx;
233 }
234
235 epf_test->dma_chan_rx = dma_chan;
236
237 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
238 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
239
240 if (!dma_chan) {
241 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
242 goto fail_back_rx;
243 }
244
245 epf_test->dma_chan_tx = dma_chan;
246 epf_test->dma_private = true;
247
248 init_completion(&epf_test->transfer_complete);
249
250 return 0;
251
252fail_back_rx:
253 dma_release_channel(epf_test->dma_chan_rx);
254 epf_test->dma_chan_tx = NULL;
255
256fail_back_tx:
257 dma_cap_zero(mask);
258 dma_cap_set(DMA_MEMCPY, mask);
259
260 dma_chan = dma_request_chan_by_mask(&mask);
261 if (IS_ERR(dma_chan)) {
262 ret = PTR_ERR(dma_chan);
263 if (ret != -EPROBE_DEFER)
264 dev_err(dev, "Failed to get DMA channel\n");
265 return ret;
266 }
267 init_completion(&epf_test->transfer_complete);
268
269 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
270
271 return 0;
272}
273
274/**
275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
276 * @epf_test: the EPF test device that performs data transfer operation
277 *
278 * Helper to cleanup EPF test DMA channel.
279 */
280static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
281{
282 if (!epf_test->dma_supported)
283 return;
284
285 dma_release_channel(epf_test->dma_chan_tx);
286 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
287 epf_test->dma_chan_tx = NULL;
288 epf_test->dma_chan_rx = NULL;
289 return;
290 }
291
292 dma_release_channel(epf_test->dma_chan_rx);
293 epf_test->dma_chan_rx = NULL;
294
295 return;
296}
297
298static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
299 const char *op, u64 size,
300 struct timespec64 *start,
301 struct timespec64 *end, bool dma)
302{
303 struct timespec64 ts = timespec64_sub(*end, *start);
304 u64 rate = 0, ns;
305
306 /* calculate the rate */
307 ns = timespec64_to_ns(&ts);
308 if (ns)
309 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
310
311 dev_info(&epf_test->epf->dev,
312 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
313 op, size, dma ? "YES" : "NO",
314 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
315}
316
317static void pci_epf_test_copy(struct pci_epf_test *epf_test,
318 struct pci_epf_test_reg *reg)
319{
320 int ret;
321 void __iomem *src_addr;
322 void __iomem *dst_addr;
323 phys_addr_t src_phys_addr;
324 phys_addr_t dst_phys_addr;
325 struct timespec64 start, end;
326 struct pci_epf *epf = epf_test->epf;
327 struct device *dev = &epf->dev;
328 struct pci_epc *epc = epf->epc;
329
330 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
331 if (!src_addr) {
332 dev_err(dev, "Failed to allocate source address\n");
333 reg->status = STATUS_SRC_ADDR_INVALID;
334 ret = -ENOMEM;
335 goto err;
336 }
337
338 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
339 reg->src_addr, reg->size);
340 if (ret) {
341 dev_err(dev, "Failed to map source address\n");
342 reg->status = STATUS_SRC_ADDR_INVALID;
343 goto err_src_addr;
344 }
345
346 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
347 if (!dst_addr) {
348 dev_err(dev, "Failed to allocate destination address\n");
349 reg->status = STATUS_DST_ADDR_INVALID;
350 ret = -ENOMEM;
351 goto err_src_map_addr;
352 }
353
354 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
355 reg->dst_addr, reg->size);
356 if (ret) {
357 dev_err(dev, "Failed to map destination address\n");
358 reg->status = STATUS_DST_ADDR_INVALID;
359 goto err_dst_addr;
360 }
361
362 ktime_get_ts64(&start);
363 if (reg->flags & FLAG_USE_DMA) {
364 if (epf_test->dma_private) {
365 dev_err(dev, "Cannot transfer data using DMA\n");
366 ret = -EINVAL;
367 goto err_map_addr;
368 }
369
370 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
371 src_phys_addr, reg->size, 0,
372 DMA_MEM_TO_MEM);
373 if (ret)
374 dev_err(dev, "Data transfer failed\n");
375 } else {
376 void *buf;
377
378 buf = kzalloc(reg->size, GFP_KERNEL);
379 if (!buf) {
380 ret = -ENOMEM;
381 goto err_map_addr;
382 }
383
384 memcpy_fromio(buf, src_addr, reg->size);
385 memcpy_toio(dst_addr, buf, reg->size);
386 kfree(buf);
387 }
388 ktime_get_ts64(&end);
389 pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
390 reg->flags & FLAG_USE_DMA);
391
392err_map_addr:
393 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
394
395err_dst_addr:
396 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
397
398err_src_map_addr:
399 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
400
401err_src_addr:
402 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
403
404err:
405 if (!ret)
406 reg->status |= STATUS_COPY_SUCCESS;
407 else
408 reg->status |= STATUS_COPY_FAIL;
409}
410
411static void pci_epf_test_read(struct pci_epf_test *epf_test,
412 struct pci_epf_test_reg *reg)
413{
414 int ret;
415 void __iomem *src_addr;
416 void *buf;
417 u32 crc32;
418 phys_addr_t phys_addr;
419 phys_addr_t dst_phys_addr;
420 struct timespec64 start, end;
421 struct pci_epf *epf = epf_test->epf;
422 struct device *dev = &epf->dev;
423 struct pci_epc *epc = epf->epc;
424 struct device *dma_dev = epf->epc->dev.parent;
425
426 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
427 if (!src_addr) {
428 dev_err(dev, "Failed to allocate address\n");
429 reg->status = STATUS_SRC_ADDR_INVALID;
430 ret = -ENOMEM;
431 goto err;
432 }
433
434 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
435 reg->src_addr, reg->size);
436 if (ret) {
437 dev_err(dev, "Failed to map address\n");
438 reg->status = STATUS_SRC_ADDR_INVALID;
439 goto err_addr;
440 }
441
442 buf = kzalloc(reg->size, GFP_KERNEL);
443 if (!buf) {
444 ret = -ENOMEM;
445 goto err_map_addr;
446 }
447
448 if (reg->flags & FLAG_USE_DMA) {
449 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
450 DMA_FROM_DEVICE);
451 if (dma_mapping_error(dma_dev, dst_phys_addr)) {
452 dev_err(dev, "Failed to map destination buffer addr\n");
453 ret = -ENOMEM;
454 goto err_dma_map;
455 }
456
457 ktime_get_ts64(&start);
458 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
459 phys_addr, reg->size,
460 reg->src_addr, DMA_DEV_TO_MEM);
461 if (ret)
462 dev_err(dev, "Data transfer failed\n");
463 ktime_get_ts64(&end);
464
465 dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
466 DMA_FROM_DEVICE);
467 } else {
468 ktime_get_ts64(&start);
469 memcpy_fromio(buf, src_addr, reg->size);
470 ktime_get_ts64(&end);
471 }
472
473 pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
474 reg->flags & FLAG_USE_DMA);
475
476 crc32 = crc32_le(~0, buf, reg->size);
477 if (crc32 != reg->checksum)
478 ret = -EIO;
479
480err_dma_map:
481 kfree(buf);
482
483err_map_addr:
484 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
485
486err_addr:
487 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
488
489err:
490 if (!ret)
491 reg->status |= STATUS_READ_SUCCESS;
492 else
493 reg->status |= STATUS_READ_FAIL;
494}
495
496static void pci_epf_test_write(struct pci_epf_test *epf_test,
497 struct pci_epf_test_reg *reg)
498{
499 int ret;
500 void __iomem *dst_addr;
501 void *buf;
502 phys_addr_t phys_addr;
503 phys_addr_t src_phys_addr;
504 struct timespec64 start, end;
505 struct pci_epf *epf = epf_test->epf;
506 struct device *dev = &epf->dev;
507 struct pci_epc *epc = epf->epc;
508 struct device *dma_dev = epf->epc->dev.parent;
509
510 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
511 if (!dst_addr) {
512 dev_err(dev, "Failed to allocate address\n");
513 reg->status = STATUS_DST_ADDR_INVALID;
514 ret = -ENOMEM;
515 goto err;
516 }
517
518 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
519 reg->dst_addr, reg->size);
520 if (ret) {
521 dev_err(dev, "Failed to map address\n");
522 reg->status = STATUS_DST_ADDR_INVALID;
523 goto err_addr;
524 }
525
526 buf = kzalloc(reg->size, GFP_KERNEL);
527 if (!buf) {
528 ret = -ENOMEM;
529 goto err_map_addr;
530 }
531
532 get_random_bytes(buf, reg->size);
533 reg->checksum = crc32_le(~0, buf, reg->size);
534
535 if (reg->flags & FLAG_USE_DMA) {
536 src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
537 DMA_TO_DEVICE);
538 if (dma_mapping_error(dma_dev, src_phys_addr)) {
539 dev_err(dev, "Failed to map source buffer addr\n");
540 ret = -ENOMEM;
541 goto err_dma_map;
542 }
543
544 ktime_get_ts64(&start);
545
546 ret = pci_epf_test_data_transfer(epf_test, phys_addr,
547 src_phys_addr, reg->size,
548 reg->dst_addr,
549 DMA_MEM_TO_DEV);
550 if (ret)
551 dev_err(dev, "Data transfer failed\n");
552 ktime_get_ts64(&end);
553
554 dma_unmap_single(dma_dev, src_phys_addr, reg->size,
555 DMA_TO_DEVICE);
556 } else {
557 ktime_get_ts64(&start);
558 memcpy_toio(dst_addr, buf, reg->size);
559 ktime_get_ts64(&end);
560 }
561
562 pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
563 reg->flags & FLAG_USE_DMA);
564
565 /*
566 * wait 1ms inorder for the write to complete. Without this delay L3
567 * error in observed in the host system.
568 */
569 usleep_range(1000, 2000);
570
571err_dma_map:
572 kfree(buf);
573
574err_map_addr:
575 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
576
577err_addr:
578 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
579
580err:
581 if (!ret)
582 reg->status |= STATUS_WRITE_SUCCESS;
583 else
584 reg->status |= STATUS_WRITE_FAIL;
585}
586
587static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
588 struct pci_epf_test_reg *reg)
589{
590 struct pci_epf *epf = epf_test->epf;
591 struct device *dev = &epf->dev;
592 struct pci_epc *epc = epf->epc;
593 u32 status = reg->status | STATUS_IRQ_RAISED;
594 int count;
595
596 /*
597 * Set the status before raising the IRQ to ensure that the host sees
598 * the updated value when it gets the IRQ.
599 */
600 WRITE_ONCE(reg->status, status);
601
602 switch (reg->irq_type) {
603 case IRQ_TYPE_INTX:
604 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
605 PCI_IRQ_INTX, 0);
606 break;
607 case IRQ_TYPE_MSI:
608 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
609 if (reg->irq_number > count || count <= 0) {
610 dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
611 reg->irq_number, count);
612 return;
613 }
614 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
615 PCI_IRQ_MSI, reg->irq_number);
616 break;
617 case IRQ_TYPE_MSIX:
618 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
619 if (reg->irq_number > count || count <= 0) {
620 dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
621 reg->irq_number, count);
622 return;
623 }
624 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
625 PCI_IRQ_MSIX, reg->irq_number);
626 break;
627 default:
628 dev_err(dev, "Failed to raise IRQ, unknown type\n");
629 break;
630 }
631}
632
633static void pci_epf_test_cmd_handler(struct work_struct *work)
634{
635 u32 command;
636 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
637 cmd_handler.work);
638 struct pci_epf *epf = epf_test->epf;
639 struct device *dev = &epf->dev;
640 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
641 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
642
643 command = READ_ONCE(reg->command);
644 if (!command)
645 goto reset_handler;
646
647 WRITE_ONCE(reg->command, 0);
648 WRITE_ONCE(reg->status, 0);
649
650 if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
651 !epf_test->dma_supported) {
652 dev_err(dev, "Cannot transfer data using DMA\n");
653 goto reset_handler;
654 }
655
656 if (reg->irq_type > IRQ_TYPE_MSIX) {
657 dev_err(dev, "Failed to detect IRQ type\n");
658 goto reset_handler;
659 }
660
661 switch (command) {
662 case COMMAND_RAISE_INTX_IRQ:
663 case COMMAND_RAISE_MSI_IRQ:
664 case COMMAND_RAISE_MSIX_IRQ:
665 pci_epf_test_raise_irq(epf_test, reg);
666 break;
667 case COMMAND_WRITE:
668 pci_epf_test_write(epf_test, reg);
669 pci_epf_test_raise_irq(epf_test, reg);
670 break;
671 case COMMAND_READ:
672 pci_epf_test_read(epf_test, reg);
673 pci_epf_test_raise_irq(epf_test, reg);
674 break;
675 case COMMAND_COPY:
676 pci_epf_test_copy(epf_test, reg);
677 pci_epf_test_raise_irq(epf_test, reg);
678 break;
679 default:
680 dev_err(dev, "Invalid command 0x%x\n", command);
681 break;
682 }
683
684reset_handler:
685 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
686 msecs_to_jiffies(1));
687}
688
689static void pci_epf_test_unbind(struct pci_epf *epf)
690{
691 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
692 struct pci_epc *epc = epf->epc;
693 struct pci_epf_bar *epf_bar;
694 int bar;
695
696 cancel_delayed_work(&epf_test->cmd_handler);
697 pci_epf_test_clean_dma_chan(epf_test);
698 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
699 epf_bar = &epf->bar[bar];
700
701 if (epf_test->reg[bar]) {
702 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
703 epf_bar);
704 pci_epf_free_space(epf, epf_test->reg[bar], bar,
705 PRIMARY_INTERFACE);
706 }
707 }
708}
709
710static int pci_epf_test_set_bar(struct pci_epf *epf)
711{
712 int bar, add;
713 int ret;
714 struct pci_epf_bar *epf_bar;
715 struct pci_epc *epc = epf->epc;
716 struct device *dev = &epf->dev;
717 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
718 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
719 const struct pci_epc_features *epc_features;
720
721 epc_features = epf_test->epc_features;
722
723 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
724 epf_bar = &epf->bar[bar];
725 /*
726 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
727 * if the specific implementation required a 64-bit BAR,
728 * even if we only requested a 32-bit BAR.
729 */
730 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
731
732 if (epc_features->bar[bar].type == BAR_RESERVED)
733 continue;
734
735 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
736 epf_bar);
737 if (ret) {
738 pci_epf_free_space(epf, epf_test->reg[bar], bar,
739 PRIMARY_INTERFACE);
740 dev_err(dev, "Failed to set BAR%d\n", bar);
741 if (bar == test_reg_bar)
742 return ret;
743 }
744 }
745
746 return 0;
747}
748
749static int pci_epf_test_core_init(struct pci_epf *epf)
750{
751 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
752 struct pci_epf_header *header = epf->header;
753 const struct pci_epc_features *epc_features;
754 struct pci_epc *epc = epf->epc;
755 struct device *dev = &epf->dev;
756 bool msix_capable = false;
757 bool msi_capable = true;
758 int ret;
759
760 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
761 if (epc_features) {
762 msix_capable = epc_features->msix_capable;
763 msi_capable = epc_features->msi_capable;
764 }
765
766 if (epf->vfunc_no <= 1) {
767 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
768 if (ret) {
769 dev_err(dev, "Configuration header write failed\n");
770 return ret;
771 }
772 }
773
774 ret = pci_epf_test_set_bar(epf);
775 if (ret)
776 return ret;
777
778 if (msi_capable) {
779 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
780 epf->msi_interrupts);
781 if (ret) {
782 dev_err(dev, "MSI configuration failed\n");
783 return ret;
784 }
785 }
786
787 if (msix_capable) {
788 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
789 epf->msix_interrupts,
790 epf_test->test_reg_bar,
791 epf_test->msix_table_offset);
792 if (ret) {
793 dev_err(dev, "MSI-X configuration failed\n");
794 return ret;
795 }
796 }
797
798 return 0;
799}
800
801static int pci_epf_test_link_up(struct pci_epf *epf)
802{
803 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
804
805 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
806 msecs_to_jiffies(1));
807
808 return 0;
809}
810
811static const struct pci_epc_event_ops pci_epf_test_event_ops = {
812 .core_init = pci_epf_test_core_init,
813 .link_up = pci_epf_test_link_up,
814};
815
816static int pci_epf_test_alloc_space(struct pci_epf *epf)
817{
818 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
819 struct device *dev = &epf->dev;
820 struct pci_epf_bar *epf_bar;
821 size_t msix_table_size = 0;
822 size_t test_reg_bar_size;
823 size_t pba_size = 0;
824 bool msix_capable;
825 void *base;
826 int bar, add;
827 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
828 const struct pci_epc_features *epc_features;
829 size_t test_reg_size;
830
831 epc_features = epf_test->epc_features;
832
833 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
834
835 msix_capable = epc_features->msix_capable;
836 if (msix_capable) {
837 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
838 epf_test->msix_table_offset = test_reg_bar_size;
839 /* Align to QWORD or 8 Bytes */
840 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
841 }
842 test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
843
844 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
845 epc_features, PRIMARY_INTERFACE);
846 if (!base) {
847 dev_err(dev, "Failed to allocated register space\n");
848 return -ENOMEM;
849 }
850 epf_test->reg[test_reg_bar] = base;
851
852 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
853 epf_bar = &epf->bar[bar];
854 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
855
856 if (bar == test_reg_bar)
857 continue;
858
859 if (epc_features->bar[bar].type == BAR_RESERVED)
860 continue;
861
862 base = pci_epf_alloc_space(epf, bar_size[bar], bar,
863 epc_features, PRIMARY_INTERFACE);
864 if (!base)
865 dev_err(dev, "Failed to allocate space for BAR%d\n",
866 bar);
867 epf_test->reg[bar] = base;
868 }
869
870 return 0;
871}
872
873static void pci_epf_configure_bar(struct pci_epf *epf,
874 const struct pci_epc_features *epc_features)
875{
876 struct pci_epf_bar *epf_bar;
877 int i;
878
879 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
880 epf_bar = &epf->bar[i];
881 if (epc_features->bar[i].only_64bit)
882 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
883 }
884}
885
886static int pci_epf_test_bind(struct pci_epf *epf)
887{
888 int ret;
889 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
890 const struct pci_epc_features *epc_features;
891 enum pci_barno test_reg_bar = BAR_0;
892 struct pci_epc *epc = epf->epc;
893 bool linkup_notifier = false;
894 bool core_init_notifier = false;
895
896 if (WARN_ON_ONCE(!epc))
897 return -EINVAL;
898
899 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
900 if (!epc_features) {
901 dev_err(&epf->dev, "epc_features not implemented\n");
902 return -EOPNOTSUPP;
903 }
904
905 linkup_notifier = epc_features->linkup_notifier;
906 core_init_notifier = epc_features->core_init_notifier;
907 test_reg_bar = pci_epc_get_first_free_bar(epc_features);
908 if (test_reg_bar < 0)
909 return -EINVAL;
910 pci_epf_configure_bar(epf, epc_features);
911
912 epf_test->test_reg_bar = test_reg_bar;
913 epf_test->epc_features = epc_features;
914
915 ret = pci_epf_test_alloc_space(epf);
916 if (ret)
917 return ret;
918
919 if (!core_init_notifier) {
920 ret = pci_epf_test_core_init(epf);
921 if (ret)
922 return ret;
923 }
924
925 epf_test->dma_supported = true;
926
927 ret = pci_epf_test_init_dma_chan(epf_test);
928 if (ret)
929 epf_test->dma_supported = false;
930
931 if (!linkup_notifier && !core_init_notifier)
932 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
933
934 return 0;
935}
936
937static const struct pci_epf_device_id pci_epf_test_ids[] = {
938 {
939 .name = "pci_epf_test",
940 },
941 {},
942};
943
944static int pci_epf_test_probe(struct pci_epf *epf,
945 const struct pci_epf_device_id *id)
946{
947 struct pci_epf_test *epf_test;
948 struct device *dev = &epf->dev;
949
950 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
951 if (!epf_test)
952 return -ENOMEM;
953
954 epf->header = &test_header;
955 epf_test->epf = epf;
956
957 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
958
959 epf->event_ops = &pci_epf_test_event_ops;
960
961 epf_set_drvdata(epf, epf_test);
962 return 0;
963}
964
965static const struct pci_epf_ops ops = {
966 .unbind = pci_epf_test_unbind,
967 .bind = pci_epf_test_bind,
968};
969
970static struct pci_epf_driver test_driver = {
971 .driver.name = "pci_epf_test",
972 .probe = pci_epf_test_probe,
973 .id_table = pci_epf_test_ids,
974 .ops = &ops,
975 .owner = THIS_MODULE,
976};
977
978static int __init pci_epf_test_init(void)
979{
980 int ret;
981
982 kpcitest_workqueue = alloc_workqueue("kpcitest",
983 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
984 if (!kpcitest_workqueue) {
985 pr_err("Failed to allocate the kpcitest work queue\n");
986 return -ENOMEM;
987 }
988
989 ret = pci_epf_register_driver(&test_driver);
990 if (ret) {
991 destroy_workqueue(kpcitest_workqueue);
992 pr_err("Failed to register pci epf test driver --> %d\n", ret);
993 return ret;
994 }
995
996 return 0;
997}
998module_init(pci_epf_test_init);
999
1000static void __exit pci_epf_test_exit(void)
1001{
1002 if (kpcitest_workqueue)
1003 destroy_workqueue(kpcitest_workqueue);
1004 pci_epf_unregister_driver(&test_driver);
1005}
1006module_exit(pci_epf_test_exit);
1007
1008MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1009MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1010MODULE_LICENSE("GPL v2");