Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI EPF driver for MHI Endpoint devices
4 *
5 * Copyright (C) 2023 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8
9#include <linux/dmaengine.h>
10#include <linux/mhi_ep.h>
11#include <linux/module.h>
12#include <linux/of_dma.h>
13#include <linux/platform_device.h>
14#include <linux/pci-epc.h>
15#include <linux/pci-epf.h>
16
17#define MHI_VERSION_1_0 0x01000000
18
19#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
20
21/* Platform specific flags */
22#define MHI_EPF_USE_DMA BIT(0)
23
24struct pci_epf_mhi_dma_transfer {
25 struct pci_epf_mhi *epf_mhi;
26 struct mhi_ep_buf_info buf_info;
27 struct list_head node;
28 dma_addr_t paddr;
29 enum dma_data_direction dir;
30 size_t size;
31};
32
33struct pci_epf_mhi_ep_info {
34 const struct mhi_ep_cntrl_config *config;
35 struct pci_epf_header *epf_header;
36 enum pci_barno bar_num;
37 u32 epf_flags;
38 u32 msi_count;
39 u32 mru;
40 u32 flags;
41};
42
43#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
44 { \
45 .num = ch_num, \
46 .name = ch_name, \
47 .dir = direction, \
48 }
49
50#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
51 MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
52
53#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
54 MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
55
56static const struct mhi_ep_channel_config mhi_v1_channels[] = {
57 MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
58 MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
59 MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
60 MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
61 MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
62 MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
63 MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
64 MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
65 MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
66 MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
67 MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
68 MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
69 MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
70 MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
71 MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
72 MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
73 MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
74 MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
75 MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
76 MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
77 MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
78 MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
79 MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
80 MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
81 MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
82 MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
83};
84
85static const struct mhi_ep_cntrl_config mhi_v1_config = {
86 .max_channels = 128,
87 .num_channels = ARRAY_SIZE(mhi_v1_channels),
88 .ch_cfg = mhi_v1_channels,
89 .mhi_version = MHI_VERSION_1_0,
90};
91
92static struct pci_epf_header sdx55_header = {
93 .vendorid = PCI_VENDOR_ID_QCOM,
94 .deviceid = 0x0306,
95 .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
96 .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
97 .interrupt_pin = PCI_INTERRUPT_INTA,
98};
99
100static const struct pci_epf_mhi_ep_info sdx55_info = {
101 .config = &mhi_v1_config,
102 .epf_header = &sdx55_header,
103 .bar_num = BAR_0,
104 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
105 .msi_count = 32,
106 .mru = 0x8000,
107};
108
109static struct pci_epf_header sm8450_header = {
110 .vendorid = PCI_VENDOR_ID_QCOM,
111 .deviceid = 0x0306,
112 .baseclass_code = PCI_CLASS_OTHERS,
113 .interrupt_pin = PCI_INTERRUPT_INTA,
114};
115
116static const struct pci_epf_mhi_ep_info sm8450_info = {
117 .config = &mhi_v1_config,
118 .epf_header = &sm8450_header,
119 .bar_num = BAR_0,
120 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
121 .msi_count = 32,
122 .mru = 0x8000,
123 .flags = MHI_EPF_USE_DMA,
124};
125
126static struct pci_epf_header sa8775p_header = {
127 .vendorid = PCI_VENDOR_ID_QCOM,
128 .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
129 .baseclass_code = PCI_CLASS_OTHERS,
130 .interrupt_pin = PCI_INTERRUPT_INTA,
131};
132
133static const struct pci_epf_mhi_ep_info sa8775p_info = {
134 .config = &mhi_v1_config,
135 .epf_header = &sa8775p_header,
136 .bar_num = BAR_0,
137 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
138 .msi_count = 32,
139 .mru = 0x8000,
140 .flags = MHI_EPF_USE_DMA,
141};
142
143struct pci_epf_mhi {
144 const struct pci_epc_features *epc_features;
145 const struct pci_epf_mhi_ep_info *info;
146 struct mhi_ep_cntrl mhi_cntrl;
147 struct pci_epf *epf;
148 struct mutex lock;
149 void __iomem *mmio;
150 resource_size_t mmio_phys;
151 struct dma_chan *dma_chan_tx;
152 struct dma_chan *dma_chan_rx;
153 struct workqueue_struct *dma_wq;
154 struct work_struct dma_work;
155 struct list_head dma_list;
156 spinlock_t list_lock;
157 u32 mmio_size;
158 int irq;
159};
160
161static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
162{
163 return addr & (epf_mhi->epc_features->align -1);
164}
165
166static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
167 phys_addr_t *paddr, void __iomem **vaddr,
168 size_t offset, size_t size)
169{
170 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
171 struct pci_epf *epf = epf_mhi->epf;
172 struct pci_epc *epc = epf->epc;
173 int ret;
174
175 *vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
176 if (!*vaddr)
177 return -ENOMEM;
178
179 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
180 pci_addr - offset, size + offset);
181 if (ret) {
182 pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
183 return ret;
184 }
185
186 *paddr = *paddr + offset;
187 *vaddr = *vaddr + offset;
188
189 return 0;
190}
191
192static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
193 phys_addr_t *paddr, void __iomem **vaddr,
194 size_t size)
195{
196 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
197 size_t offset = get_align_offset(epf_mhi, pci_addr);
198
199 return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
200 offset, size);
201}
202
203static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
204 u64 pci_addr, phys_addr_t paddr,
205 void __iomem *vaddr, size_t offset,
206 size_t size)
207{
208 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
209 struct pci_epf *epf = epf_mhi->epf;
210 struct pci_epc *epc = epf->epc;
211
212 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
213 pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
214 size + offset);
215}
216
217static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
218 phys_addr_t paddr, void __iomem *vaddr,
219 size_t size)
220{
221 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
222 size_t offset = get_align_offset(epf_mhi, pci_addr);
223
224 __pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
225 size);
226}
227
228static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
229{
230 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
231 struct pci_epf *epf = epf_mhi->epf;
232 struct pci_epc *epc = epf->epc;
233
234 /*
235 * MHI supplies 0 based MSI vectors but the API expects the vector
236 * number to start from 1, so we need to increment the vector by 1.
237 */
238 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI,
239 vector + 1);
240}
241
242static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
243 struct mhi_ep_buf_info *buf_info)
244{
245 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
246 size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
247 void __iomem *tre_buf;
248 phys_addr_t tre_phys;
249 int ret;
250
251 mutex_lock(&epf_mhi->lock);
252
253 ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
254 &tre_buf, offset, buf_info->size);
255 if (ret) {
256 mutex_unlock(&epf_mhi->lock);
257 return ret;
258 }
259
260 memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
261
262 __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
263 tre_buf, offset, buf_info->size);
264
265 mutex_unlock(&epf_mhi->lock);
266
267 if (buf_info->cb)
268 buf_info->cb(buf_info);
269
270 return 0;
271}
272
273static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
274 struct mhi_ep_buf_info *buf_info)
275{
276 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
277 size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
278 void __iomem *tre_buf;
279 phys_addr_t tre_phys;
280 int ret;
281
282 mutex_lock(&epf_mhi->lock);
283
284 ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
285 &tre_buf, offset, buf_info->size);
286 if (ret) {
287 mutex_unlock(&epf_mhi->lock);
288 return ret;
289 }
290
291 memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
292
293 __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
294 tre_buf, offset, buf_info->size);
295
296 mutex_unlock(&epf_mhi->lock);
297
298 if (buf_info->cb)
299 buf_info->cb(buf_info);
300
301 return 0;
302}
303
304static void pci_epf_mhi_dma_callback(void *param)
305{
306 complete(param);
307}
308
309static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
310 struct mhi_ep_buf_info *buf_info)
311{
312 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
313 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
314 struct dma_chan *chan = epf_mhi->dma_chan_rx;
315 struct device *dev = &epf_mhi->epf->dev;
316 DECLARE_COMPLETION_ONSTACK(complete);
317 struct dma_async_tx_descriptor *desc;
318 struct dma_slave_config config = {};
319 dma_cookie_t cookie;
320 dma_addr_t dst_addr;
321 int ret;
322
323 if (buf_info->size < SZ_4K)
324 return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
325
326 mutex_lock(&epf_mhi->lock);
327
328 config.direction = DMA_DEV_TO_MEM;
329 config.src_addr = buf_info->host_addr;
330
331 ret = dmaengine_slave_config(chan, &config);
332 if (ret) {
333 dev_err(dev, "Failed to configure DMA channel\n");
334 goto err_unlock;
335 }
336
337 dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
338 DMA_FROM_DEVICE);
339 ret = dma_mapping_error(dma_dev, dst_addr);
340 if (ret) {
341 dev_err(dev, "Failed to map remote memory\n");
342 goto err_unlock;
343 }
344
345 desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
346 DMA_DEV_TO_MEM,
347 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
348 if (!desc) {
349 dev_err(dev, "Failed to prepare DMA\n");
350 ret = -EIO;
351 goto err_unmap;
352 }
353
354 desc->callback = pci_epf_mhi_dma_callback;
355 desc->callback_param = &complete;
356
357 cookie = dmaengine_submit(desc);
358 ret = dma_submit_error(cookie);
359 if (ret) {
360 dev_err(dev, "Failed to do DMA submit\n");
361 goto err_unmap;
362 }
363
364 dma_async_issue_pending(chan);
365 ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
366 if (!ret) {
367 dev_err(dev, "DMA transfer timeout\n");
368 dmaengine_terminate_sync(chan);
369 ret = -ETIMEDOUT;
370 }
371
372err_unmap:
373 dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
374err_unlock:
375 mutex_unlock(&epf_mhi->lock);
376
377 return ret;
378}
379
380static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
381 struct mhi_ep_buf_info *buf_info)
382{
383 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
384 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
385 struct dma_chan *chan = epf_mhi->dma_chan_tx;
386 struct device *dev = &epf_mhi->epf->dev;
387 DECLARE_COMPLETION_ONSTACK(complete);
388 struct dma_async_tx_descriptor *desc;
389 struct dma_slave_config config = {};
390 dma_cookie_t cookie;
391 dma_addr_t src_addr;
392 int ret;
393
394 if (buf_info->size < SZ_4K)
395 return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
396
397 mutex_lock(&epf_mhi->lock);
398
399 config.direction = DMA_MEM_TO_DEV;
400 config.dst_addr = buf_info->host_addr;
401
402 ret = dmaengine_slave_config(chan, &config);
403 if (ret) {
404 dev_err(dev, "Failed to configure DMA channel\n");
405 goto err_unlock;
406 }
407
408 src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
409 DMA_TO_DEVICE);
410 ret = dma_mapping_error(dma_dev, src_addr);
411 if (ret) {
412 dev_err(dev, "Failed to map remote memory\n");
413 goto err_unlock;
414 }
415
416 desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
417 DMA_MEM_TO_DEV,
418 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
419 if (!desc) {
420 dev_err(dev, "Failed to prepare DMA\n");
421 ret = -EIO;
422 goto err_unmap;
423 }
424
425 desc->callback = pci_epf_mhi_dma_callback;
426 desc->callback_param = &complete;
427
428 cookie = dmaengine_submit(desc);
429 ret = dma_submit_error(cookie);
430 if (ret) {
431 dev_err(dev, "Failed to do DMA submit\n");
432 goto err_unmap;
433 }
434
435 dma_async_issue_pending(chan);
436 ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
437 if (!ret) {
438 dev_err(dev, "DMA transfer timeout\n");
439 dmaengine_terminate_sync(chan);
440 ret = -ETIMEDOUT;
441 }
442
443err_unmap:
444 dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
445err_unlock:
446 mutex_unlock(&epf_mhi->lock);
447
448 return ret;
449}
450
451static void pci_epf_mhi_dma_worker(struct work_struct *work)
452{
453 struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
454 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
455 struct pci_epf_mhi_dma_transfer *itr, *tmp;
456 struct mhi_ep_buf_info *buf_info;
457 unsigned long flags;
458 LIST_HEAD(head);
459
460 spin_lock_irqsave(&epf_mhi->list_lock, flags);
461 list_splice_tail_init(&epf_mhi->dma_list, &head);
462 spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
463
464 list_for_each_entry_safe(itr, tmp, &head, node) {
465 list_del(&itr->node);
466 dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
467 buf_info = &itr->buf_info;
468 buf_info->cb(buf_info);
469 kfree(itr);
470 }
471}
472
473static void pci_epf_mhi_dma_async_callback(void *param)
474{
475 struct pci_epf_mhi_dma_transfer *transfer = param;
476 struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
477
478 spin_lock(&epf_mhi->list_lock);
479 list_add_tail(&transfer->node, &epf_mhi->dma_list);
480 spin_unlock(&epf_mhi->list_lock);
481
482 queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
483}
484
485static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
486 struct mhi_ep_buf_info *buf_info)
487{
488 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
489 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
490 struct pci_epf_mhi_dma_transfer *transfer = NULL;
491 struct dma_chan *chan = epf_mhi->dma_chan_rx;
492 struct device *dev = &epf_mhi->epf->dev;
493 DECLARE_COMPLETION_ONSTACK(complete);
494 struct dma_async_tx_descriptor *desc;
495 struct dma_slave_config config = {};
496 dma_cookie_t cookie;
497 dma_addr_t dst_addr;
498 int ret;
499
500 mutex_lock(&epf_mhi->lock);
501
502 config.direction = DMA_DEV_TO_MEM;
503 config.src_addr = buf_info->host_addr;
504
505 ret = dmaengine_slave_config(chan, &config);
506 if (ret) {
507 dev_err(dev, "Failed to configure DMA channel\n");
508 goto err_unlock;
509 }
510
511 dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
512 DMA_FROM_DEVICE);
513 ret = dma_mapping_error(dma_dev, dst_addr);
514 if (ret) {
515 dev_err(dev, "Failed to map remote memory\n");
516 goto err_unlock;
517 }
518
519 desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
520 DMA_DEV_TO_MEM,
521 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
522 if (!desc) {
523 dev_err(dev, "Failed to prepare DMA\n");
524 ret = -EIO;
525 goto err_unmap;
526 }
527
528 transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
529 if (!transfer) {
530 ret = -ENOMEM;
531 goto err_unmap;
532 }
533
534 transfer->epf_mhi = epf_mhi;
535 transfer->paddr = dst_addr;
536 transfer->size = buf_info->size;
537 transfer->dir = DMA_FROM_DEVICE;
538 memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
539
540 desc->callback = pci_epf_mhi_dma_async_callback;
541 desc->callback_param = transfer;
542
543 cookie = dmaengine_submit(desc);
544 ret = dma_submit_error(cookie);
545 if (ret) {
546 dev_err(dev, "Failed to do DMA submit\n");
547 goto err_free_transfer;
548 }
549
550 dma_async_issue_pending(chan);
551
552 goto err_unlock;
553
554err_free_transfer:
555 kfree(transfer);
556err_unmap:
557 dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
558err_unlock:
559 mutex_unlock(&epf_mhi->lock);
560
561 return ret;
562}
563
564static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
565 struct mhi_ep_buf_info *buf_info)
566{
567 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
568 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
569 struct pci_epf_mhi_dma_transfer *transfer = NULL;
570 struct dma_chan *chan = epf_mhi->dma_chan_tx;
571 struct device *dev = &epf_mhi->epf->dev;
572 DECLARE_COMPLETION_ONSTACK(complete);
573 struct dma_async_tx_descriptor *desc;
574 struct dma_slave_config config = {};
575 dma_cookie_t cookie;
576 dma_addr_t src_addr;
577 int ret;
578
579 mutex_lock(&epf_mhi->lock);
580
581 config.direction = DMA_MEM_TO_DEV;
582 config.dst_addr = buf_info->host_addr;
583
584 ret = dmaengine_slave_config(chan, &config);
585 if (ret) {
586 dev_err(dev, "Failed to configure DMA channel\n");
587 goto err_unlock;
588 }
589
590 src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
591 DMA_TO_DEVICE);
592 ret = dma_mapping_error(dma_dev, src_addr);
593 if (ret) {
594 dev_err(dev, "Failed to map remote memory\n");
595 goto err_unlock;
596 }
597
598 desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
599 DMA_MEM_TO_DEV,
600 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
601 if (!desc) {
602 dev_err(dev, "Failed to prepare DMA\n");
603 ret = -EIO;
604 goto err_unmap;
605 }
606
607 transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
608 if (!transfer) {
609 ret = -ENOMEM;
610 goto err_unmap;
611 }
612
613 transfer->epf_mhi = epf_mhi;
614 transfer->paddr = src_addr;
615 transfer->size = buf_info->size;
616 transfer->dir = DMA_TO_DEVICE;
617 memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
618
619 desc->callback = pci_epf_mhi_dma_async_callback;
620 desc->callback_param = transfer;
621
622 cookie = dmaengine_submit(desc);
623 ret = dma_submit_error(cookie);
624 if (ret) {
625 dev_err(dev, "Failed to do DMA submit\n");
626 goto err_free_transfer;
627 }
628
629 dma_async_issue_pending(chan);
630
631 goto err_unlock;
632
633err_free_transfer:
634 kfree(transfer);
635err_unmap:
636 dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
637err_unlock:
638 mutex_unlock(&epf_mhi->lock);
639
640 return ret;
641}
642
643struct epf_dma_filter {
644 struct device *dev;
645 u32 dma_mask;
646};
647
648static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
649{
650 struct epf_dma_filter *filter = node;
651 struct dma_slave_caps caps;
652
653 memset(&caps, 0, sizeof(caps));
654 dma_get_slave_caps(chan, &caps);
655
656 return chan->device->dev == filter->dev && filter->dma_mask &
657 caps.directions;
658}
659
660static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
661{
662 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
663 struct device *dev = &epf_mhi->epf->dev;
664 struct epf_dma_filter filter;
665 dma_cap_mask_t mask;
666 int ret;
667
668 dma_cap_zero(mask);
669 dma_cap_set(DMA_SLAVE, mask);
670
671 filter.dev = dma_dev;
672 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
673 epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
674 &filter);
675 if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
676 dev_err(dev, "Failed to request tx channel\n");
677 return -ENODEV;
678 }
679
680 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
681 epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
682 &filter);
683 if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
684 dev_err(dev, "Failed to request rx channel\n");
685 ret = -ENODEV;
686 goto err_release_tx;
687 }
688
689 epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
690 if (!epf_mhi->dma_wq) {
691 ret = -ENOMEM;
692 goto err_release_rx;
693 }
694
695 INIT_LIST_HEAD(&epf_mhi->dma_list);
696 INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
697 spin_lock_init(&epf_mhi->list_lock);
698
699 return 0;
700
701err_release_rx:
702 dma_release_channel(epf_mhi->dma_chan_rx);
703 epf_mhi->dma_chan_rx = NULL;
704err_release_tx:
705 dma_release_channel(epf_mhi->dma_chan_tx);
706 epf_mhi->dma_chan_tx = NULL;
707
708 return ret;
709}
710
711static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
712{
713 destroy_workqueue(epf_mhi->dma_wq);
714 dma_release_channel(epf_mhi->dma_chan_tx);
715 dma_release_channel(epf_mhi->dma_chan_rx);
716 epf_mhi->dma_chan_tx = NULL;
717 epf_mhi->dma_chan_rx = NULL;
718}
719
720static int pci_epf_mhi_epc_init(struct pci_epf *epf)
721{
722 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
723 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
724 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
725 struct pci_epc *epc = epf->epc;
726 struct device *dev = &epf->dev;
727 int ret;
728
729 epf_bar->phys_addr = epf_mhi->mmio_phys;
730 epf_bar->size = epf_mhi->mmio_size;
731 epf_bar->barno = info->bar_num;
732 epf_bar->flags = info->epf_flags;
733 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
734 if (ret) {
735 dev_err(dev, "Failed to set BAR: %d\n", ret);
736 return ret;
737 }
738
739 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
740 order_base_2(info->msi_count));
741 if (ret) {
742 dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
743 return ret;
744 }
745
746 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
747 epf->header);
748 if (ret) {
749 dev_err(dev, "Failed to set Configuration header: %d\n", ret);
750 return ret;
751 }
752
753 epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
754 if (!epf_mhi->epc_features)
755 return -ENODATA;
756
757 if (info->flags & MHI_EPF_USE_DMA) {
758 ret = pci_epf_mhi_dma_init(epf_mhi);
759 if (ret) {
760 dev_err(dev, "Failed to initialize DMA: %d\n", ret);
761 return ret;
762 }
763 }
764
765 return 0;
766}
767
768static void pci_epf_mhi_epc_deinit(struct pci_epf *epf)
769{
770 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
771 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
772 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
773 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
774 struct pci_epc *epc = epf->epc;
775
776 if (mhi_cntrl->mhi_dev) {
777 mhi_ep_power_down(mhi_cntrl);
778 if (info->flags & MHI_EPF_USE_DMA)
779 pci_epf_mhi_dma_deinit(epf_mhi);
780 mhi_ep_unregister_controller(mhi_cntrl);
781 }
782
783 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
784}
785
786static int pci_epf_mhi_link_up(struct pci_epf *epf)
787{
788 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
789 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
790 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
791 struct pci_epc *epc = epf->epc;
792 struct device *dev = &epf->dev;
793 int ret;
794
795 mhi_cntrl->mmio = epf_mhi->mmio;
796 mhi_cntrl->irq = epf_mhi->irq;
797 mhi_cntrl->mru = info->mru;
798
799 /* Assign the struct dev of PCI EP as MHI controller device */
800 mhi_cntrl->cntrl_dev = epc->dev.parent;
801 mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
802 mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
803 mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
804 mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
805 mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
806 if (info->flags & MHI_EPF_USE_DMA) {
807 mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
808 mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
809 mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
810 mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
811 }
812
813 /* Register the MHI EP controller */
814 ret = mhi_ep_register_controller(mhi_cntrl, info->config);
815 if (ret) {
816 dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
817 if (info->flags & MHI_EPF_USE_DMA)
818 pci_epf_mhi_dma_deinit(epf_mhi);
819 return ret;
820 }
821
822 return 0;
823}
824
825static int pci_epf_mhi_link_down(struct pci_epf *epf)
826{
827 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
828 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
829 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
830
831 if (mhi_cntrl->mhi_dev) {
832 mhi_ep_power_down(mhi_cntrl);
833 if (info->flags & MHI_EPF_USE_DMA)
834 pci_epf_mhi_dma_deinit(epf_mhi);
835 mhi_ep_unregister_controller(mhi_cntrl);
836 }
837
838 return 0;
839}
840
841static int pci_epf_mhi_bus_master_enable(struct pci_epf *epf)
842{
843 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
844 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
845 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
846 struct device *dev = &epf->dev;
847 int ret;
848
849 /*
850 * Power up the MHI EP stack if link is up and stack is in power down
851 * state.
852 */
853 if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
854 ret = mhi_ep_power_up(mhi_cntrl);
855 if (ret) {
856 dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
857 if (info->flags & MHI_EPF_USE_DMA)
858 pci_epf_mhi_dma_deinit(epf_mhi);
859 mhi_ep_unregister_controller(mhi_cntrl);
860 }
861 }
862
863 return 0;
864}
865
866static int pci_epf_mhi_bind(struct pci_epf *epf)
867{
868 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
869 struct pci_epc *epc = epf->epc;
870 struct device *dev = &epf->dev;
871 struct platform_device *pdev = to_platform_device(epc->dev.parent);
872 struct resource *res;
873 int ret;
874
875 /* Get MMIO base address from Endpoint controller */
876 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
877 if (!res) {
878 dev_err(dev, "Failed to get \"mmio\" resource\n");
879 return -ENODEV;
880 }
881
882 epf_mhi->mmio_phys = res->start;
883 epf_mhi->mmio_size = resource_size(res);
884
885 epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
886 if (!epf_mhi->mmio)
887 return -ENOMEM;
888
889 ret = platform_get_irq_byname(pdev, "doorbell");
890 if (ret < 0) {
891 iounmap(epf_mhi->mmio);
892 return ret;
893 }
894
895 epf_mhi->irq = ret;
896
897 return 0;
898}
899
900static void pci_epf_mhi_unbind(struct pci_epf *epf)
901{
902 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
903 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
904 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
905 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
906 struct pci_epc *epc = epf->epc;
907
908 /*
909 * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
910 * stack back to working state after successive bind is by getting Bus
911 * Master Enable event from host.
912 */
913 if (mhi_cntrl->mhi_dev) {
914 mhi_ep_power_down(mhi_cntrl);
915 if (info->flags & MHI_EPF_USE_DMA)
916 pci_epf_mhi_dma_deinit(epf_mhi);
917 mhi_ep_unregister_controller(mhi_cntrl);
918 }
919
920 iounmap(epf_mhi->mmio);
921 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
922}
923
924static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
925 .epc_init = pci_epf_mhi_epc_init,
926 .epc_deinit = pci_epf_mhi_epc_deinit,
927 .link_up = pci_epf_mhi_link_up,
928 .link_down = pci_epf_mhi_link_down,
929 .bus_master_enable = pci_epf_mhi_bus_master_enable,
930};
931
932static int pci_epf_mhi_probe(struct pci_epf *epf,
933 const struct pci_epf_device_id *id)
934{
935 struct pci_epf_mhi_ep_info *info =
936 (struct pci_epf_mhi_ep_info *)id->driver_data;
937 struct pci_epf_mhi *epf_mhi;
938 struct device *dev = &epf->dev;
939
940 epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
941 if (!epf_mhi)
942 return -ENOMEM;
943
944 epf->header = info->epf_header;
945 epf_mhi->info = info;
946 epf_mhi->epf = epf;
947
948 epf->event_ops = &pci_epf_mhi_event_ops;
949
950 mutex_init(&epf_mhi->lock);
951
952 epf_set_drvdata(epf, epf_mhi);
953
954 return 0;
955}
956
957static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
958 { .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info },
959 { .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
960 { .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
961 {},
962};
963
964static const struct pci_epf_ops pci_epf_mhi_ops = {
965 .unbind = pci_epf_mhi_unbind,
966 .bind = pci_epf_mhi_bind,
967};
968
969static struct pci_epf_driver pci_epf_mhi_driver = {
970 .driver.name = "pci_epf_mhi",
971 .probe = pci_epf_mhi_probe,
972 .id_table = pci_epf_mhi_ids,
973 .ops = &pci_epf_mhi_ops,
974 .owner = THIS_MODULE,
975};
976
977static int __init pci_epf_mhi_init(void)
978{
979 return pci_epf_register_driver(&pci_epf_mhi_driver);
980}
981module_init(pci_epf_mhi_init);
982
983static void __exit pci_epf_mhi_exit(void)
984{
985 pci_epf_unregister_driver(&pci_epf_mhi_driver);
986}
987module_exit(pci_epf_mhi_exit);
988
989MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
990MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
991MODULE_LICENSE("GPL");