Loading...
Note: File does not exist in v3.5.6.
1/* SPDX-License-Identifier: GPL-2.0-only
2 *
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Moises Veleta <moises.veleta@intel.com>
9 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
10 * Sreehari Kancharla <sreehari.kancharla@intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 * Eliot Lee <eliot.lee@intel.com>
16 */
17
18#ifndef __T7XX_HIF_CLDMA_H__
19#define __T7XX_HIF_CLDMA_H__
20
21#include <linux/bits.h>
22#include <linux/device.h>
23#include <linux/dmapool.h>
24#include <linux/pci.h>
25#include <linux/skbuff.h>
26#include <linux/spinlock.h>
27#include <linux/wait.h>
28#include <linux/workqueue.h>
29#include <linux/types.h>
30
31#include "t7xx_cldma.h"
32#include "t7xx_pci.h"
33
34/**
35 * enum cldma_id - Identifiers for CLDMA HW units.
36 * @CLDMA_ID_MD: Modem control channel.
37 * @CLDMA_ID_AP: Application Processor control channel.
38 * @CLDMA_NUM: Number of CLDMA HW units available.
39 */
40enum cldma_id {
41 CLDMA_ID_MD,
42 CLDMA_ID_AP,
43 CLDMA_NUM
44};
45
46struct cldma_gpd {
47 u8 flags;
48 u8 not_used1;
49 __le16 rx_data_allow_len;
50 __le32 next_gpd_ptr_h;
51 __le32 next_gpd_ptr_l;
52 __le32 data_buff_bd_ptr_h;
53 __le32 data_buff_bd_ptr_l;
54 __le16 data_buff_len;
55 __le16 not_used2;
56};
57
58struct cldma_request {
59 struct cldma_gpd *gpd; /* Virtual address for CPU */
60 dma_addr_t gpd_addr; /* Physical address for DMA */
61 struct sk_buff *skb;
62 dma_addr_t mapped_buff;
63 struct list_head entry;
64};
65
66struct cldma_ring {
67 struct list_head gpd_ring; /* Ring of struct cldma_request */
68 unsigned int length; /* Number of struct cldma_request */
69 int pkt_size;
70};
71
72struct cldma_queue {
73 struct cldma_ctrl *md_ctrl;
74 enum mtk_txrx dir;
75 unsigned int index;
76 struct cldma_ring *tr_ring;
77 struct cldma_request *tr_done;
78 struct cldma_request *rx_refill;
79 struct cldma_request *tx_next;
80 int budget; /* Same as ring buffer size by default */
81 spinlock_t ring_lock;
82 wait_queue_head_t req_wq; /* Only for TX */
83 struct workqueue_struct *worker;
84 struct work_struct cldma_work;
85};
86
87struct cldma_ctrl {
88 enum cldma_id hif_id;
89 struct device *dev;
90 struct t7xx_pci_dev *t7xx_dev;
91 struct cldma_queue txq[CLDMA_TXQ_NUM];
92 struct cldma_queue rxq[CLDMA_RXQ_NUM];
93 unsigned short txq_active;
94 unsigned short rxq_active;
95 unsigned short txq_started;
96 spinlock_t cldma_lock; /* Protects CLDMA structure */
97 /* Assumes T/R GPD/BD/SPD have the same size */
98 struct dma_pool *gpd_dmapool;
99 struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
100 struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
101 struct md_pm_entity *pm_entity;
102 struct t7xx_cldma_hw hw_info;
103 bool is_late_init;
104 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
105};
106
107#define GPD_FLAGS_HWO BIT(0)
108#define GPD_FLAGS_IOC BIT(7)
109#define GPD_DMAPOOL_ALIGN 16
110
111#define CLDMA_MTU 3584 /* 3.5kB */
112
113int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
114void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
115int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
116void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
117void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
118void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
119int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
120void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
121void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
122 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
123int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
124void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
125void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
126
127#endif /* __T7XX_HIF_CLDMA_H__ */