Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel MIC Platform Software Stack (MPSS)
4 *
5 * Copyright(c) 2014 Intel Corporation.
6 *
7 * Intel SCIF driver.
8 */
9#include "scif_main.h"
10#include "scif_map.h"
11
12void scif_cleanup_ep_qp(struct scif_endpt *ep)
13{
14 struct scif_qp *qp = ep->qp_info.qp;
15
16 if (qp->outbound_q.rb_base) {
17 scif_iounmap((void *)qp->outbound_q.rb_base,
18 qp->outbound_q.size, ep->remote_dev);
19 qp->outbound_q.rb_base = NULL;
20 }
21 if (qp->remote_qp) {
22 scif_iounmap((void *)qp->remote_qp,
23 sizeof(struct scif_qp), ep->remote_dev);
24 qp->remote_qp = NULL;
25 }
26 if (qp->local_qp) {
27 scif_unmap_single(qp->local_qp, ep->remote_dev,
28 sizeof(struct scif_qp));
29 qp->local_qp = 0x0;
30 }
31 if (qp->local_buf) {
32 scif_unmap_single(qp->local_buf, ep->remote_dev,
33 SCIF_ENDPT_QP_SIZE);
34 qp->local_buf = 0;
35 }
36}
37
38void scif_teardown_ep(void *endpt)
39{
40 struct scif_endpt *ep = endpt;
41 struct scif_qp *qp = ep->qp_info.qp;
42
43 if (qp) {
44 spin_lock(&ep->lock);
45 scif_cleanup_ep_qp(ep);
46 spin_unlock(&ep->lock);
47 kfree(qp->inbound_q.rb_base);
48 kfree(qp);
49 }
50}
51
52/*
53 * Enqueue the endpoint to the zombie list for cleanup.
54 * The endpoint should not be accessed once this API returns.
55 */
56void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
57{
58 if (!eplock_held)
59 mutex_lock(&scif_info.eplock);
60 spin_lock(&ep->lock);
61 ep->state = SCIFEP_ZOMBIE;
62 spin_unlock(&ep->lock);
63 list_add_tail(&ep->list, &scif_info.zombie);
64 scif_info.nr_zombies++;
65 if (!eplock_held)
66 mutex_unlock(&scif_info.eplock);
67 schedule_work(&scif_info.misc_work);
68}
69
70static struct scif_endpt *scif_find_listen_ep(u16 port)
71{
72 struct scif_endpt *ep = NULL;
73 struct list_head *pos, *tmpq;
74
75 mutex_lock(&scif_info.eplock);
76 list_for_each_safe(pos, tmpq, &scif_info.listen) {
77 ep = list_entry(pos, struct scif_endpt, list);
78 if (ep->port.port == port) {
79 mutex_unlock(&scif_info.eplock);
80 return ep;
81 }
82 }
83 mutex_unlock(&scif_info.eplock);
84 return NULL;
85}
86
87void scif_cleanup_zombie_epd(void)
88{
89 struct list_head *pos, *tmpq;
90 struct scif_endpt *ep;
91
92 mutex_lock(&scif_info.eplock);
93 list_for_each_safe(pos, tmpq, &scif_info.zombie) {
94 ep = list_entry(pos, struct scif_endpt, list);
95 if (scif_rma_ep_can_uninit(ep)) {
96 list_del(pos);
97 scif_info.nr_zombies--;
98 put_iova_domain(&ep->rma_info.iovad);
99 kfree(ep);
100 }
101 }
102 mutex_unlock(&scif_info.eplock);
103}
104
105/**
106 * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
107 * @scifdev: SCIF device
108 * @msg: Interrupt message
109 *
110 * This message is initiated by the remote node to request a connection
111 * to the local node. This function looks for an end point in the
112 * listen state on the requested port id.
113 *
114 * If it finds a listening port it places the connect request on the
115 * listening end points queue and wakes up any pending accept calls.
116 *
117 * If it does not find a listening end point it sends a connection
118 * reject message to the remote node.
119 */
120void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
121{
122 struct scif_endpt *ep = NULL;
123 struct scif_conreq *conreq;
124
125 conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
126 if (!conreq)
127 /* Lack of resources so reject the request. */
128 goto conreq_sendrej;
129
130 ep = scif_find_listen_ep(msg->dst.port);
131 if (!ep)
132 /* Send reject due to no listening ports */
133 goto conreq_sendrej_free;
134 else
135 spin_lock(&ep->lock);
136
137 if (ep->backlog <= ep->conreqcnt) {
138 /* Send reject due to too many pending requests */
139 spin_unlock(&ep->lock);
140 goto conreq_sendrej_free;
141 }
142
143 conreq->msg = *msg;
144 list_add_tail(&conreq->list, &ep->conlist);
145 ep->conreqcnt++;
146 wake_up_interruptible(&ep->conwq);
147 spin_unlock(&ep->lock);
148 return;
149
150conreq_sendrej_free:
151 kfree(conreq);
152conreq_sendrej:
153 msg->uop = SCIF_CNCT_REJ;
154 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
155}
156
157/**
158 * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
159 * @scifdev: SCIF device
160 * @msg: Interrupt message
161 *
162 * An accept() on the remote node has occurred and sent this message
163 * to indicate success. Place the end point in the MAPPING state and
164 * save the remote nodes memory information. Then wake up the connect
165 * request so it can finish.
166 */
167void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
168{
169 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
170
171 spin_lock(&ep->lock);
172 if (SCIFEP_CONNECTING == ep->state) {
173 ep->peer.node = msg->src.node;
174 ep->peer.port = msg->src.port;
175 ep->qp_info.gnt_pld = msg->payload[1];
176 ep->remote_ep = msg->payload[2];
177 ep->state = SCIFEP_MAPPING;
178
179 wake_up(&ep->conwq);
180 }
181 spin_unlock(&ep->lock);
182}
183
184/**
185 * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
186 * @scifdev: SCIF device
187 * @msg: Interrupt message
188 *
189 * The remote connection request has finished mapping the local memory.
190 * Place the connection in the connected state and wake up the pending
191 * accept() call.
192 */
193void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
194{
195 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
196
197 mutex_lock(&scif_info.connlock);
198 spin_lock(&ep->lock);
199 /* New ep is now connected with all resources set. */
200 ep->state = SCIFEP_CONNECTED;
201 list_add_tail(&ep->list, &scif_info.connected);
202 wake_up(&ep->conwq);
203 spin_unlock(&ep->lock);
204 mutex_unlock(&scif_info.connlock);
205}
206
207/**
208 * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
209 * @scifdev: SCIF device
210 * @msg: Interrupt message
211 *
212 * The remote connection request failed to map the local memory it was sent.
213 * Place the end point in the CLOSING state to indicate it and wake up
214 * the pending accept();
215 */
216void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
217{
218 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
219
220 spin_lock(&ep->lock);
221 ep->state = SCIFEP_CLOSING;
222 wake_up(&ep->conwq);
223 spin_unlock(&ep->lock);
224}
225
226/**
227 * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
228 * @scifdev: SCIF device
229 * @msg: Interrupt message
230 *
231 * The remote end has rejected the connection request. Set the end
232 * point back to the bound state and wake up the pending connect().
233 */
234void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
235{
236 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
237
238 spin_lock(&ep->lock);
239 if (SCIFEP_CONNECTING == ep->state) {
240 ep->state = SCIFEP_BOUND;
241 wake_up(&ep->conwq);
242 }
243 spin_unlock(&ep->lock);
244}
245
246/**
247 * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
248 * @scifdev: SCIF device
249 * @msg: Interrupt message
250 *
251 * The remote node has indicated close() has been called on its end
252 * point. Remove the local end point from the connected list, set its
253 * state to disconnected and ensure accesses to the remote node are
254 * shutdown.
255 *
256 * When all accesses to the remote end have completed then send a
257 * DISCNT_ACK to indicate it can remove its resources and complete
258 * the close routine.
259 */
260void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
261{
262 struct scif_endpt *ep = NULL;
263 struct scif_endpt *tmpep;
264 struct list_head *pos, *tmpq;
265
266 mutex_lock(&scif_info.connlock);
267 list_for_each_safe(pos, tmpq, &scif_info.connected) {
268 tmpep = list_entry(pos, struct scif_endpt, list);
269 /*
270 * The local ep may have sent a disconnect and and been closed
271 * due to a message response time out. It may have been
272 * allocated again and formed a new connection so we want to
273 * check if the remote ep matches
274 */
275 if (((u64)tmpep == msg->payload[1]) &&
276 ((u64)tmpep->remote_ep == msg->payload[0])) {
277 list_del(pos);
278 ep = tmpep;
279 spin_lock(&ep->lock);
280 break;
281 }
282 }
283
284 /*
285 * If the terminated end is not found then this side started closing
286 * before the other side sent the disconnect. If so the ep will no
287 * longer be on the connected list. Regardless the other side
288 * needs to be acked to let it know close is complete.
289 */
290 if (!ep) {
291 mutex_unlock(&scif_info.connlock);
292 goto discnct_ack;
293 }
294
295 ep->state = SCIFEP_DISCONNECTED;
296 list_add_tail(&ep->list, &scif_info.disconnected);
297
298 wake_up_interruptible(&ep->sendwq);
299 wake_up_interruptible(&ep->recvwq);
300 spin_unlock(&ep->lock);
301 mutex_unlock(&scif_info.connlock);
302
303discnct_ack:
304 msg->uop = SCIF_DISCNT_ACK;
305 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
306}
307
308/**
309 * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
310 * @scifdev: SCIF device
311 * @msg: Interrupt message
312 *
313 * Remote side has indicated it has not more references to local resources
314 */
315void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
316{
317 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
318
319 spin_lock(&ep->lock);
320 ep->state = SCIFEP_DISCONNECTED;
321 spin_unlock(&ep->lock);
322 complete(&ep->discon);
323}
324
325/**
326 * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
327 * @scifdev: SCIF device
328 * @msg: Interrupt message
329 *
330 * Remote side is confirming send or receive interrupt handling is complete.
331 */
332void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
333{
334 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
335
336 spin_lock(&ep->lock);
337 if (SCIFEP_CONNECTED == ep->state)
338 wake_up_interruptible(&ep->recvwq);
339 spin_unlock(&ep->lock);
340}
341
342/**
343 * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
344 * @scifdev: SCIF device
345 * @msg: Interrupt message
346 *
347 * Remote side is confirming send or receive interrupt handling is complete.
348 */
349void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
350{
351 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
352
353 spin_lock(&ep->lock);
354 if (SCIFEP_CONNECTED == ep->state)
355 wake_up_interruptible(&ep->sendwq);
356 spin_unlock(&ep->lock);
357}