Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel MIC Platform Software Stack (MPSS)
4 *
5 * Copyright(c) 2014 Intel Corporation.
6 *
7 * Intel SCIF driver.
8 */
9#include <linux/module.h>
10#include <linux/idr.h>
11
12#include <linux/mic_common.h>
13#include "../common/mic_dev.h"
14#include "../bus/scif_bus.h"
15#include "scif_peer_bus.h"
16#include "scif_main.h"
17#include "scif_map.h"
18
19struct scif_info scif_info = {
20 .mdev = {
21 .minor = MISC_DYNAMIC_MINOR,
22 .name = "scif",
23 .fops = &scif_fops,
24 }
25};
26
27struct scif_dev *scif_dev;
28struct kmem_cache *unaligned_cache;
29static atomic_t g_loopb_cnt;
30
31/* Runs in the context of intr_wq */
32static void scif_intr_bh_handler(struct work_struct *work)
33{
34 struct scif_dev *scifdev =
35 container_of(work, struct scif_dev, intr_bh);
36
37 if (scifdev_self(scifdev))
38 scif_loopb_msg_handler(scifdev, scifdev->qpairs);
39 else
40 scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
41}
42
43int scif_setup_intr_wq(struct scif_dev *scifdev)
44{
45 if (!scifdev->intr_wq) {
46 snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
47 "SCIF INTR %d", scifdev->node);
48 scifdev->intr_wq =
49 alloc_ordered_workqueue(scifdev->intr_wqname, 0);
50 if (!scifdev->intr_wq)
51 return -ENOMEM;
52 INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
53 }
54 return 0;
55}
56
57void scif_destroy_intr_wq(struct scif_dev *scifdev)
58{
59 if (scifdev->intr_wq) {
60 destroy_workqueue(scifdev->intr_wq);
61 scifdev->intr_wq = NULL;
62 }
63}
64
65irqreturn_t scif_intr_handler(int irq, void *data)
66{
67 struct scif_dev *scifdev = data;
68 struct scif_hw_dev *sdev = scifdev->sdev;
69
70 sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
71 queue_work(scifdev->intr_wq, &scifdev->intr_bh);
72 return IRQ_HANDLED;
73}
74
75static void scif_qp_setup_handler(struct work_struct *work)
76{
77 struct scif_dev *scifdev = container_of(work, struct scif_dev,
78 qp_dwork.work);
79 struct scif_hw_dev *sdev = scifdev->sdev;
80 dma_addr_t da = 0;
81 int err;
82
83 if (scif_is_mgmt_node()) {
84 struct mic_bootparam *bp = sdev->dp;
85
86 da = bp->scif_card_dma_addr;
87 scifdev->rdb = bp->h2c_scif_db;
88 } else {
89 struct mic_bootparam __iomem *bp = sdev->rdp;
90
91 da = readq(&bp->scif_host_dma_addr);
92 scifdev->rdb = ioread8(&bp->c2h_scif_db);
93 }
94 if (da) {
95 err = scif_qp_response(da, scifdev);
96 if (err)
97 dev_err(&scifdev->sdev->dev,
98 "scif_qp_response err %d\n", err);
99 } else {
100 schedule_delayed_work(&scifdev->qp_dwork,
101 msecs_to_jiffies(1000));
102 }
103}
104
105static int scif_setup_scifdev(void)
106{
107 /* We support a maximum of 129 SCIF nodes including the mgmt node */
108#define MAX_SCIF_NODES 129
109 int i;
110 u8 num_nodes = MAX_SCIF_NODES;
111
112 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
113 if (!scif_dev)
114 return -ENOMEM;
115 for (i = 0; i < num_nodes; i++) {
116 struct scif_dev *scifdev = &scif_dev[i];
117
118 scifdev->node = i;
119 scifdev->exit = OP_IDLE;
120 init_waitqueue_head(&scifdev->disconn_wq);
121 mutex_init(&scifdev->lock);
122 INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
123 INIT_DELAYED_WORK(&scifdev->p2p_dwork,
124 scif_poll_qp_state);
125 INIT_DELAYED_WORK(&scifdev->qp_dwork,
126 scif_qp_setup_handler);
127 INIT_LIST_HEAD(&scifdev->p2p);
128 RCU_INIT_POINTER(scifdev->spdev, NULL);
129 }
130 return 0;
131}
132
133static void scif_destroy_scifdev(void)
134{
135 kfree(scif_dev);
136 scif_dev = NULL;
137}
138
139static int scif_probe(struct scif_hw_dev *sdev)
140{
141 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
142 int rc;
143
144 dev_set_drvdata(&sdev->dev, sdev);
145 scifdev->sdev = sdev;
146
147 if (1 == atomic_add_return(1, &g_loopb_cnt)) {
148 struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
149
150 loopb_dev->sdev = sdev;
151 rc = scif_setup_loopback_qp(loopb_dev);
152 if (rc)
153 goto exit;
154 }
155
156 rc = scif_setup_intr_wq(scifdev);
157 if (rc)
158 goto destroy_loopb;
159 rc = scif_setup_qp(scifdev);
160 if (rc)
161 goto destroy_intr;
162 scifdev->db = sdev->hw_ops->next_db(sdev);
163 scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
164 "SCIF_INTR", scifdev,
165 scifdev->db);
166 if (IS_ERR(scifdev->cookie)) {
167 rc = PTR_ERR(scifdev->cookie);
168 goto free_qp;
169 }
170 if (scif_is_mgmt_node()) {
171 struct mic_bootparam *bp = sdev->dp;
172
173 bp->c2h_scif_db = scifdev->db;
174 bp->scif_host_dma_addr = scifdev->qp_dma_addr;
175 } else {
176 struct mic_bootparam __iomem *bp = sdev->rdp;
177
178 iowrite8(scifdev->db, &bp->h2c_scif_db);
179 writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
180 }
181 schedule_delayed_work(&scifdev->qp_dwork,
182 msecs_to_jiffies(1000));
183 return rc;
184free_qp:
185 scif_free_qp(scifdev);
186destroy_intr:
187 scif_destroy_intr_wq(scifdev);
188destroy_loopb:
189 if (atomic_dec_and_test(&g_loopb_cnt))
190 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
191exit:
192 return rc;
193}
194
195void scif_stop(struct scif_dev *scifdev)
196{
197 struct scif_dev *dev;
198 int i;
199
200 for (i = scif_info.maxid; i >= 0; i--) {
201 dev = &scif_dev[i];
202 if (scifdev_self(dev))
203 continue;
204 scif_handle_remove_node(i);
205 }
206}
207
208static void scif_remove(struct scif_hw_dev *sdev)
209{
210 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
211
212 if (scif_is_mgmt_node()) {
213 struct mic_bootparam *bp = sdev->dp;
214
215 bp->c2h_scif_db = -1;
216 bp->scif_host_dma_addr = 0x0;
217 } else {
218 struct mic_bootparam __iomem *bp = sdev->rdp;
219
220 iowrite8(-1, &bp->h2c_scif_db);
221 writeq(0x0, &bp->scif_card_dma_addr);
222 }
223 if (scif_is_mgmt_node()) {
224 scif_disconnect_node(scifdev->node, true);
225 } else {
226 scif_info.card_initiated_exit = true;
227 scif_stop(scifdev);
228 }
229 if (atomic_dec_and_test(&g_loopb_cnt))
230 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
231 if (scifdev->cookie) {
232 sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
233 scifdev->cookie = NULL;
234 }
235 scif_destroy_intr_wq(scifdev);
236 cancel_delayed_work(&scifdev->qp_dwork);
237 scif_free_qp(scifdev);
238 scifdev->rdb = -1;
239 scifdev->sdev = NULL;
240}
241
242static struct scif_hw_dev_id id_table[] = {
243 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
244 { 0 },
245};
246
247static struct scif_driver scif_driver = {
248 .driver.name = KBUILD_MODNAME,
249 .driver.owner = THIS_MODULE,
250 .id_table = id_table,
251 .probe = scif_probe,
252 .remove = scif_remove,
253};
254
255static int _scif_init(void)
256{
257 int rc;
258
259 mutex_init(&scif_info.eplock);
260 spin_lock_init(&scif_info.rmalock);
261 spin_lock_init(&scif_info.nb_connect_lock);
262 spin_lock_init(&scif_info.port_lock);
263 mutex_init(&scif_info.conflock);
264 mutex_init(&scif_info.connlock);
265 mutex_init(&scif_info.fencelock);
266 INIT_LIST_HEAD(&scif_info.uaccept);
267 INIT_LIST_HEAD(&scif_info.listen);
268 INIT_LIST_HEAD(&scif_info.zombie);
269 INIT_LIST_HEAD(&scif_info.connected);
270 INIT_LIST_HEAD(&scif_info.disconnected);
271 INIT_LIST_HEAD(&scif_info.rma);
272 INIT_LIST_HEAD(&scif_info.rma_tc);
273 INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
274 INIT_LIST_HEAD(&scif_info.fence);
275 INIT_LIST_HEAD(&scif_info.nb_connect_list);
276 init_waitqueue_head(&scif_info.exitwq);
277 scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
278 scif_info.en_msg_log = 0;
279 scif_info.p2p_enable = 1;
280 rc = scif_setup_scifdev();
281 if (rc)
282 goto error;
283 unaligned_cache = kmem_cache_create("Unaligned_DMA",
284 SCIF_KMEM_UNALIGNED_BUF_SIZE,
285 0, SLAB_HWCACHE_ALIGN, NULL);
286 if (!unaligned_cache) {
287 rc = -ENOMEM;
288 goto free_sdev;
289 }
290 INIT_WORK(&scif_info.misc_work, scif_misc_handler);
291 INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
292 INIT_WORK(&scif_info.conn_work, scif_conn_handler);
293 idr_init(&scif_ports);
294 return 0;
295free_sdev:
296 scif_destroy_scifdev();
297error:
298 return rc;
299}
300
301static void _scif_exit(void)
302{
303 idr_destroy(&scif_ports);
304 kmem_cache_destroy(unaligned_cache);
305 scif_destroy_scifdev();
306}
307
308static int __init scif_init(void)
309{
310 struct miscdevice *mdev = &scif_info.mdev;
311 int rc;
312
313 _scif_init();
314 iova_cache_get();
315 rc = scif_peer_bus_init();
316 if (rc)
317 goto exit;
318 rc = scif_register_driver(&scif_driver);
319 if (rc)
320 goto peer_bus_exit;
321 rc = misc_register(mdev);
322 if (rc)
323 goto unreg_scif;
324 scif_init_debugfs();
325 return 0;
326unreg_scif:
327 scif_unregister_driver(&scif_driver);
328peer_bus_exit:
329 scif_peer_bus_exit();
330exit:
331 _scif_exit();
332 return rc;
333}
334
335static void __exit scif_exit(void)
336{
337 scif_exit_debugfs();
338 misc_deregister(&scif_info.mdev);
339 scif_unregister_driver(&scif_driver);
340 scif_peer_bus_exit();
341 iova_cache_put();
342 _scif_exit();
343}
344
345module_init(scif_init);
346module_exit(scif_exit);
347
348MODULE_DEVICE_TABLE(scif, id_table);
349MODULE_AUTHOR("Intel Corporation");
350MODULE_DESCRIPTION("Intel(R) SCIF driver");
351MODULE_LICENSE("GPL v2");