Loading...
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
8 */
9
10#include <linux/completion.h>
11#include <linux/dma-mapping.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/idr.h>
16#include <linux/interrupt.h>
17#include <linux/random.h>
18#include <linux/rbtree.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/sysfs.h>
22#include <linux/workqueue.h>
23#include <linux/kdev_t.h>
24#include <linux/etherdevice.h>
25
26#include <rdma/ib_cache.h>
27#include <rdma/ib_cm.h>
28#include <rdma/ib_sysfs.h>
29#include "cm_msgs.h"
30#include "core_priv.h"
31#include "cm_trace.h"
32
33MODULE_AUTHOR("Sean Hefty");
34MODULE_DESCRIPTION("InfiniBand CM");
35MODULE_LICENSE("Dual BSD/GPL");
36
37static const char * const ibcm_rej_reason_strs[] = {
38 [IB_CM_REJ_NO_QP] = "no QP",
39 [IB_CM_REJ_NO_EEC] = "no EEC",
40 [IB_CM_REJ_NO_RESOURCES] = "no resources",
41 [IB_CM_REJ_TIMEOUT] = "timeout",
42 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
43 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
44 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
45 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
46 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
47 [IB_CM_REJ_STALE_CONN] = "stale conn",
48 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
49 [IB_CM_REJ_INVALID_GID] = "invalid GID",
50 [IB_CM_REJ_INVALID_LID] = "invalid LID",
51 [IB_CM_REJ_INVALID_SL] = "invalid SL",
52 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
53 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
54 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
55 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
56 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
57 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
58 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
59 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
60 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
61 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
62 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
63 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
64 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
65 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
66 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
67 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
68 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
69 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
70 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
71 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
72 "vendor option is not supported",
73};
74
75const char *__attribute_const__ ibcm_reject_msg(int reason)
76{
77 size_t index = reason;
78
79 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
80 ibcm_rej_reason_strs[index])
81 return ibcm_rej_reason_strs[index];
82 else
83 return "unrecognized reason";
84}
85EXPORT_SYMBOL(ibcm_reject_msg);
86
87struct cm_id_private;
88struct cm_work;
89static int cm_add_one(struct ib_device *device);
90static void cm_remove_one(struct ib_device *device, void *client_data);
91static void cm_process_work(struct cm_id_private *cm_id_priv,
92 struct cm_work *work);
93static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
94 struct ib_cm_sidr_rep_param *param);
95static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
96 const void *private_data, u8 private_data_len);
97static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
98 void *private_data, u8 private_data_len);
99static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
100 enum ib_cm_rej_reason reason, void *ari,
101 u8 ari_length, const void *private_data,
102 u8 private_data_len);
103
104static struct ib_client cm_client = {
105 .name = "cm",
106 .add = cm_add_one,
107 .remove = cm_remove_one
108};
109
110static struct ib_cm {
111 spinlock_t lock;
112 struct list_head device_list;
113 rwlock_t device_lock;
114 struct rb_root listen_service_table;
115 u64 listen_service_id;
116 /* struct rb_root peer_service_table; todo: fix peer to peer */
117 struct rb_root remote_qp_table;
118 struct rb_root remote_id_table;
119 struct rb_root remote_sidr_table;
120 struct xarray local_id_table;
121 u32 local_id_next;
122 __be32 random_id_operand;
123 struct list_head timewait_list;
124 struct workqueue_struct *wq;
125} cm;
126
127/* Counter indexes ordered by attribute ID */
128enum {
129 CM_REQ_COUNTER,
130 CM_MRA_COUNTER,
131 CM_REJ_COUNTER,
132 CM_REP_COUNTER,
133 CM_RTU_COUNTER,
134 CM_DREQ_COUNTER,
135 CM_DREP_COUNTER,
136 CM_SIDR_REQ_COUNTER,
137 CM_SIDR_REP_COUNTER,
138 CM_LAP_COUNTER,
139 CM_APR_COUNTER,
140 CM_ATTR_COUNT,
141 CM_ATTR_ID_OFFSET = 0x0010,
142};
143
144enum {
145 CM_XMIT,
146 CM_XMIT_RETRIES,
147 CM_RECV,
148 CM_RECV_DUPLICATES,
149 CM_COUNTER_GROUPS
150};
151
152struct cm_counter_attribute {
153 struct ib_port_attribute attr;
154 unsigned short group;
155 unsigned short index;
156};
157
158struct cm_port {
159 struct cm_device *cm_dev;
160 struct ib_mad_agent *mad_agent;
161 u32 port_num;
162 atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
163};
164
165struct cm_device {
166 struct kref kref;
167 struct list_head list;
168 spinlock_t mad_agent_lock;
169 struct ib_device *ib_device;
170 u8 ack_delay;
171 int going_down;
172 struct cm_port *port[];
173};
174
175struct cm_av {
176 struct cm_port *port;
177 struct rdma_ah_attr ah_attr;
178 u16 dlid_datapath;
179 u16 pkey_index;
180 u8 timeout;
181};
182
183struct cm_work {
184 struct delayed_work work;
185 struct list_head list;
186 struct cm_port *port;
187 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
188 __be32 local_id; /* Established / timewait */
189 __be32 remote_id;
190 struct ib_cm_event cm_event;
191 struct sa_path_rec path[];
192};
193
194struct cm_timewait_info {
195 struct cm_work work;
196 struct list_head list;
197 struct rb_node remote_qp_node;
198 struct rb_node remote_id_node;
199 __be64 remote_ca_guid;
200 __be32 remote_qpn;
201 u8 inserted_remote_qp;
202 u8 inserted_remote_id;
203};
204
205struct cm_id_private {
206 struct ib_cm_id id;
207
208 struct rb_node service_node;
209 struct rb_node sidr_id_node;
210 u32 sidr_slid;
211 spinlock_t lock; /* Do not acquire inside cm.lock */
212 struct completion comp;
213 refcount_t refcount;
214 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
215 * Protected by the cm.lock spinlock.
216 */
217 int listen_sharecount;
218 struct rcu_head rcu;
219
220 struct ib_mad_send_buf *msg;
221 struct cm_timewait_info *timewait_info;
222 /* todo: use alternate port on send failure */
223 struct cm_av av;
224 struct cm_av alt_av;
225
226 void *private_data;
227 __be64 tid;
228 __be32 local_qpn;
229 __be32 remote_qpn;
230 enum ib_qp_type qp_type;
231 __be32 sq_psn;
232 __be32 rq_psn;
233 int timeout_ms;
234 enum ib_mtu path_mtu;
235 __be16 pkey;
236 u8 private_data_len;
237 u8 max_cm_retries;
238 u8 responder_resources;
239 u8 initiator_depth;
240 u8 retry_count;
241 u8 rnr_retry_count;
242 u8 service_timeout;
243 u8 target_ack_delay;
244
245 struct list_head work_list;
246 atomic_t work_count;
247
248 struct rdma_ucm_ece ece;
249};
250
251static void cm_dev_release(struct kref *kref)
252{
253 struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
254 u32 i;
255
256 rdma_for_each_port(cm_dev->ib_device, i)
257 kfree(cm_dev->port[i - 1]);
258
259 kfree(cm_dev);
260}
261
262static void cm_device_put(struct cm_device *cm_dev)
263{
264 kref_put(&cm_dev->kref, cm_dev_release);
265}
266
267static void cm_work_handler(struct work_struct *work);
268
269static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
270{
271 if (refcount_dec_and_test(&cm_id_priv->refcount))
272 complete(&cm_id_priv->comp);
273}
274
275static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
276{
277 struct ib_mad_agent *mad_agent;
278 struct ib_mad_send_buf *m;
279 struct ib_ah *ah;
280
281 lockdep_assert_held(&cm_id_priv->lock);
282
283 if (!cm_id_priv->av.port)
284 return ERR_PTR(-EINVAL);
285
286 spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
287 mad_agent = cm_id_priv->av.port->mad_agent;
288 if (!mad_agent) {
289 m = ERR_PTR(-EINVAL);
290 goto out;
291 }
292
293 ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
294 if (IS_ERR(ah)) {
295 m = ERR_CAST(ah);
296 goto out;
297 }
298
299 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
300 cm_id_priv->av.pkey_index,
301 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
302 GFP_ATOMIC,
303 IB_MGMT_BASE_VERSION);
304 if (IS_ERR(m)) {
305 rdma_destroy_ah(ah, 0);
306 goto out;
307 }
308
309 /* Timeout set by caller if response is expected. */
310 m->ah = ah;
311 m->retries = cm_id_priv->max_cm_retries;
312
313 refcount_inc(&cm_id_priv->refcount);
314 m->context[0] = cm_id_priv;
315
316out:
317 spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
318 return m;
319}
320
321static void cm_free_msg(struct ib_mad_send_buf *msg)
322{
323 struct cm_id_private *cm_id_priv = msg->context[0];
324
325 if (msg->ah)
326 rdma_destroy_ah(msg->ah, 0);
327 cm_deref_id(cm_id_priv);
328 ib_free_send_mad(msg);
329}
330
331static struct ib_mad_send_buf *
332cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
333{
334 struct ib_mad_send_buf *msg;
335
336 lockdep_assert_held(&cm_id_priv->lock);
337
338 msg = cm_alloc_msg(cm_id_priv);
339 if (IS_ERR(msg))
340 return msg;
341 cm_id_priv->msg = msg;
342 return msg;
343}
344
345static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
346{
347 struct cm_id_private *cm_id_priv = msg->context[0];
348
349 lockdep_assert_held(&cm_id_priv->lock);
350
351 if (!WARN_ON(cm_id_priv->msg != msg))
352 cm_id_priv->msg = NULL;
353
354 if (msg->ah)
355 rdma_destroy_ah(msg->ah, 0);
356 cm_deref_id(cm_id_priv);
357 ib_free_send_mad(msg);
358}
359
360static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
361 struct ib_mad_recv_wc *mad_recv_wc)
362{
363 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
364 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
365 GFP_ATOMIC,
366 IB_MGMT_BASE_VERSION);
367}
368
369static int cm_create_response_msg_ah(struct cm_port *port,
370 struct ib_mad_recv_wc *mad_recv_wc,
371 struct ib_mad_send_buf *msg)
372{
373 struct ib_ah *ah;
374
375 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
376 mad_recv_wc->recv_buf.grh, port->port_num);
377 if (IS_ERR(ah))
378 return PTR_ERR(ah);
379
380 msg->ah = ah;
381 return 0;
382}
383
384static int cm_alloc_response_msg(struct cm_port *port,
385 struct ib_mad_recv_wc *mad_recv_wc,
386 struct ib_mad_send_buf **msg)
387{
388 struct ib_mad_send_buf *m;
389 int ret;
390
391 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
392 if (IS_ERR(m))
393 return PTR_ERR(m);
394
395 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
396 if (ret) {
397 ib_free_send_mad(m);
398 return ret;
399 }
400
401 *msg = m;
402 return 0;
403}
404
405static void cm_free_response_msg(struct ib_mad_send_buf *msg)
406{
407 if (msg->ah)
408 rdma_destroy_ah(msg->ah, 0);
409 ib_free_send_mad(msg);
410}
411
412static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
413{
414 void *data;
415
416 if (!private_data || !private_data_len)
417 return NULL;
418
419 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
420 if (!data)
421 return ERR_PTR(-ENOMEM);
422
423 return data;
424}
425
426static void cm_set_private_data(struct cm_id_private *cm_id_priv,
427 void *private_data, u8 private_data_len)
428{
429 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
430 kfree(cm_id_priv->private_data);
431
432 cm_id_priv->private_data = private_data;
433 cm_id_priv->private_data_len = private_data_len;
434}
435
436static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
437{
438 struct cm_port *old_port = av->port;
439
440 if (old_port == port)
441 return;
442
443 av->port = port;
444 if (old_port)
445 cm_device_put(old_port->cm_dev);
446 if (port)
447 kref_get(&port->cm_dev->kref);
448}
449
450static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
451 struct rdma_ah_attr *ah_attr, struct cm_av *av)
452{
453 cm_set_av_port(av, port);
454 av->pkey_index = wc->pkey_index;
455 rdma_move_ah_attr(&av->ah_attr, ah_attr);
456}
457
458static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
459 struct ib_grh *grh, struct cm_av *av)
460{
461 cm_set_av_port(av, port);
462 av->pkey_index = wc->pkey_index;
463 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
464 port->port_num, wc,
465 grh, &av->ah_attr);
466}
467
468static struct cm_port *
469get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
470{
471 struct cm_device *cm_dev;
472 struct cm_port *port = NULL;
473 unsigned long flags;
474
475 if (attr) {
476 read_lock_irqsave(&cm.device_lock, flags);
477 list_for_each_entry(cm_dev, &cm.device_list, list) {
478 if (cm_dev->ib_device == attr->device) {
479 port = cm_dev->port[attr->port_num - 1];
480 break;
481 }
482 }
483 read_unlock_irqrestore(&cm.device_lock, flags);
484 } else {
485 /* SGID attribute can be NULL in following
486 * conditions.
487 * (a) Alternative path
488 * (b) IB link layer without GRH
489 * (c) LAP send messages
490 */
491 read_lock_irqsave(&cm.device_lock, flags);
492 list_for_each_entry(cm_dev, &cm.device_list, list) {
493 attr = rdma_find_gid(cm_dev->ib_device,
494 &path->sgid,
495 sa_conv_pathrec_to_gid_type(path),
496 NULL);
497 if (!IS_ERR(attr)) {
498 port = cm_dev->port[attr->port_num - 1];
499 break;
500 }
501 }
502 read_unlock_irqrestore(&cm.device_lock, flags);
503 if (port)
504 rdma_put_gid_attr(attr);
505 }
506 return port;
507}
508
509static int cm_init_av_by_path(struct sa_path_rec *path,
510 const struct ib_gid_attr *sgid_attr,
511 struct cm_av *av)
512{
513 struct rdma_ah_attr new_ah_attr;
514 struct cm_device *cm_dev;
515 struct cm_port *port;
516 int ret;
517
518 port = get_cm_port_from_path(path, sgid_attr);
519 if (!port)
520 return -EINVAL;
521 cm_dev = port->cm_dev;
522
523 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
524 be16_to_cpu(path->pkey), &av->pkey_index);
525 if (ret)
526 return ret;
527
528 cm_set_av_port(av, port);
529
530 /*
531 * av->ah_attr might be initialized based on wc or during
532 * request processing time which might have reference to sgid_attr.
533 * So initialize a new ah_attr on stack.
534 * If initialization fails, old ah_attr is used for sending any
535 * responses. If initialization is successful, than new ah_attr
536 * is used by overwriting the old one. So that right ah_attr
537 * can be used to return an error response.
538 */
539 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
540 &new_ah_attr, sgid_attr);
541 if (ret)
542 return ret;
543
544 av->timeout = path->packet_life_time + 1;
545 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
546 return 0;
547}
548
549/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
550static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
551{
552 cm_set_av_port(dest, src->port);
553 cm_set_av_port(src, NULL);
554 dest->pkey_index = src->pkey_index;
555 rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
556 dest->timeout = src->timeout;
557}
558
559static void cm_destroy_av(struct cm_av *av)
560{
561 rdma_destroy_ah_attr(&av->ah_attr);
562 cm_set_av_port(av, NULL);
563}
564
565static u32 cm_local_id(__be32 local_id)
566{
567 return (__force u32) (local_id ^ cm.random_id_operand);
568}
569
570static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
571{
572 struct cm_id_private *cm_id_priv;
573
574 rcu_read_lock();
575 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
576 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
577 !refcount_inc_not_zero(&cm_id_priv->refcount))
578 cm_id_priv = NULL;
579 rcu_read_unlock();
580
581 return cm_id_priv;
582}
583
584/*
585 * Trivial helpers to strip endian annotation and compare; the
586 * endianness doesn't actually matter since we just need a stable
587 * order for the RB tree.
588 */
589static int be32_lt(__be32 a, __be32 b)
590{
591 return (__force u32) a < (__force u32) b;
592}
593
594static int be32_gt(__be32 a, __be32 b)
595{
596 return (__force u32) a > (__force u32) b;
597}
598
599static int be64_lt(__be64 a, __be64 b)
600{
601 return (__force u64) a < (__force u64) b;
602}
603
604static int be64_gt(__be64 a, __be64 b)
605{
606 return (__force u64) a > (__force u64) b;
607}
608
609/*
610 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
611 * if the new ID was inserted, NULL if it could not be inserted due to a
612 * collision, or the existing cm_id_priv ready for shared usage.
613 */
614static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
615 ib_cm_handler shared_handler)
616{
617 struct rb_node **link = &cm.listen_service_table.rb_node;
618 struct rb_node *parent = NULL;
619 struct cm_id_private *cur_cm_id_priv;
620 __be64 service_id = cm_id_priv->id.service_id;
621 unsigned long flags;
622
623 spin_lock_irqsave(&cm.lock, flags);
624 while (*link) {
625 parent = *link;
626 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
627 service_node);
628
629 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
630 link = &(*link)->rb_left;
631 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
632 link = &(*link)->rb_right;
633 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
634 link = &(*link)->rb_left;
635 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
636 link = &(*link)->rb_right;
637 else {
638 /*
639 * Sharing an ib_cm_id with different handlers is not
640 * supported
641 */
642 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
643 cur_cm_id_priv->id.context ||
644 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
645 spin_unlock_irqrestore(&cm.lock, flags);
646 return NULL;
647 }
648 refcount_inc(&cur_cm_id_priv->refcount);
649 cur_cm_id_priv->listen_sharecount++;
650 spin_unlock_irqrestore(&cm.lock, flags);
651 return cur_cm_id_priv;
652 }
653 }
654 cm_id_priv->listen_sharecount++;
655 rb_link_node(&cm_id_priv->service_node, parent, link);
656 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
657 spin_unlock_irqrestore(&cm.lock, flags);
658 return cm_id_priv;
659}
660
661static struct cm_id_private *cm_find_listen(struct ib_device *device,
662 __be64 service_id)
663{
664 struct rb_node *node = cm.listen_service_table.rb_node;
665 struct cm_id_private *cm_id_priv;
666
667 while (node) {
668 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
669
670 if (device < cm_id_priv->id.device)
671 node = node->rb_left;
672 else if (device > cm_id_priv->id.device)
673 node = node->rb_right;
674 else if (be64_lt(service_id, cm_id_priv->id.service_id))
675 node = node->rb_left;
676 else if (be64_gt(service_id, cm_id_priv->id.service_id))
677 node = node->rb_right;
678 else {
679 refcount_inc(&cm_id_priv->refcount);
680 return cm_id_priv;
681 }
682 }
683 return NULL;
684}
685
686static struct cm_timewait_info *
687cm_insert_remote_id(struct cm_timewait_info *timewait_info)
688{
689 struct rb_node **link = &cm.remote_id_table.rb_node;
690 struct rb_node *parent = NULL;
691 struct cm_timewait_info *cur_timewait_info;
692 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
693 __be32 remote_id = timewait_info->work.remote_id;
694
695 while (*link) {
696 parent = *link;
697 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
698 remote_id_node);
699 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
700 link = &(*link)->rb_left;
701 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
702 link = &(*link)->rb_right;
703 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
704 link = &(*link)->rb_left;
705 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
706 link = &(*link)->rb_right;
707 else
708 return cur_timewait_info;
709 }
710 timewait_info->inserted_remote_id = 1;
711 rb_link_node(&timewait_info->remote_id_node, parent, link);
712 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
713 return NULL;
714}
715
716static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
717 __be32 remote_id)
718{
719 struct rb_node *node = cm.remote_id_table.rb_node;
720 struct cm_timewait_info *timewait_info;
721 struct cm_id_private *res = NULL;
722
723 spin_lock_irq(&cm.lock);
724 while (node) {
725 timewait_info = rb_entry(node, struct cm_timewait_info,
726 remote_id_node);
727 if (be32_lt(remote_id, timewait_info->work.remote_id))
728 node = node->rb_left;
729 else if (be32_gt(remote_id, timewait_info->work.remote_id))
730 node = node->rb_right;
731 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
732 node = node->rb_left;
733 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
734 node = node->rb_right;
735 else {
736 res = cm_acquire_id(timewait_info->work.local_id,
737 timewait_info->work.remote_id);
738 break;
739 }
740 }
741 spin_unlock_irq(&cm.lock);
742 return res;
743}
744
745static struct cm_timewait_info *
746cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
747{
748 struct rb_node **link = &cm.remote_qp_table.rb_node;
749 struct rb_node *parent = NULL;
750 struct cm_timewait_info *cur_timewait_info;
751 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
752 __be32 remote_qpn = timewait_info->remote_qpn;
753
754 while (*link) {
755 parent = *link;
756 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
757 remote_qp_node);
758 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
759 link = &(*link)->rb_left;
760 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
761 link = &(*link)->rb_right;
762 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
763 link = &(*link)->rb_left;
764 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
765 link = &(*link)->rb_right;
766 else
767 return cur_timewait_info;
768 }
769 timewait_info->inserted_remote_qp = 1;
770 rb_link_node(&timewait_info->remote_qp_node, parent, link);
771 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
772 return NULL;
773}
774
775static struct cm_id_private *
776cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
777{
778 struct rb_node **link = &cm.remote_sidr_table.rb_node;
779 struct rb_node *parent = NULL;
780 struct cm_id_private *cur_cm_id_priv;
781 __be32 remote_id = cm_id_priv->id.remote_id;
782
783 while (*link) {
784 parent = *link;
785 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
786 sidr_id_node);
787 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
788 link = &(*link)->rb_left;
789 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
790 link = &(*link)->rb_right;
791 else {
792 if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
793 link = &(*link)->rb_left;
794 else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
795 link = &(*link)->rb_right;
796 else
797 return cur_cm_id_priv;
798 }
799 }
800 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
801 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
802 return NULL;
803}
804
805static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
806 ib_cm_handler cm_handler,
807 void *context)
808{
809 struct cm_id_private *cm_id_priv;
810 u32 id;
811 int ret;
812
813 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
814 if (!cm_id_priv)
815 return ERR_PTR(-ENOMEM);
816
817 cm_id_priv->id.state = IB_CM_IDLE;
818 cm_id_priv->id.device = device;
819 cm_id_priv->id.cm_handler = cm_handler;
820 cm_id_priv->id.context = context;
821 cm_id_priv->id.remote_cm_qpn = 1;
822
823 RB_CLEAR_NODE(&cm_id_priv->service_node);
824 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
825 spin_lock_init(&cm_id_priv->lock);
826 init_completion(&cm_id_priv->comp);
827 INIT_LIST_HEAD(&cm_id_priv->work_list);
828 atomic_set(&cm_id_priv->work_count, -1);
829 refcount_set(&cm_id_priv->refcount, 1);
830
831 ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
832 &cm.local_id_next, GFP_KERNEL);
833 if (ret < 0)
834 goto error;
835 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
836
837 return cm_id_priv;
838
839error:
840 kfree(cm_id_priv);
841 return ERR_PTR(ret);
842}
843
844/*
845 * Make the ID visible to the MAD handlers and other threads that use the
846 * xarray.
847 */
848static void cm_finalize_id(struct cm_id_private *cm_id_priv)
849{
850 xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
851 cm_id_priv, GFP_ATOMIC);
852}
853
854struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
855 ib_cm_handler cm_handler,
856 void *context)
857{
858 struct cm_id_private *cm_id_priv;
859
860 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
861 if (IS_ERR(cm_id_priv))
862 return ERR_CAST(cm_id_priv);
863
864 cm_finalize_id(cm_id_priv);
865 return &cm_id_priv->id;
866}
867EXPORT_SYMBOL(ib_create_cm_id);
868
869static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
870{
871 struct cm_work *work;
872
873 if (list_empty(&cm_id_priv->work_list))
874 return NULL;
875
876 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
877 list_del(&work->list);
878 return work;
879}
880
881static void cm_free_work(struct cm_work *work)
882{
883 if (work->mad_recv_wc)
884 ib_free_recv_mad(work->mad_recv_wc);
885 kfree(work);
886}
887
888static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
889 struct cm_work *work)
890 __releases(&cm_id_priv->lock)
891{
892 bool immediate;
893
894 /*
895 * To deliver the event to the user callback we have the drop the
896 * spinlock, however, we need to ensure that the user callback is single
897 * threaded and receives events in the temporal order. If there are
898 * already events being processed then thread new events onto a list,
899 * the thread currently processing will pick them up.
900 */
901 immediate = atomic_inc_and_test(&cm_id_priv->work_count);
902 if (!immediate) {
903 list_add_tail(&work->list, &cm_id_priv->work_list);
904 /*
905 * This routine always consumes incoming reference. Once queued
906 * to the work_list then a reference is held by the thread
907 * currently running cm_process_work() and this reference is not
908 * needed.
909 */
910 cm_deref_id(cm_id_priv);
911 }
912 spin_unlock_irq(&cm_id_priv->lock);
913
914 if (immediate)
915 cm_process_work(cm_id_priv, work);
916}
917
918static inline int cm_convert_to_ms(int iba_time)
919{
920 /* approximate conversion to ms from 4.096us x 2^iba_time */
921 return 1 << max(iba_time - 8, 0);
922}
923
924/*
925 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
926 * Because of how ack_timeout is stored, adding one doubles the timeout.
927 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
928 * increment it (round up) only if the other is within 50%.
929 */
930static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
931{
932 int ack_timeout = packet_life_time + 1;
933
934 if (ack_timeout >= ca_ack_delay)
935 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
936 else
937 ack_timeout = ca_ack_delay +
938 (ack_timeout >= (ca_ack_delay - 1));
939
940 return min(31, ack_timeout);
941}
942
943static void cm_remove_remote(struct cm_id_private *cm_id_priv)
944{
945 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
946
947 if (timewait_info->inserted_remote_id) {
948 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
949 timewait_info->inserted_remote_id = 0;
950 }
951
952 if (timewait_info->inserted_remote_qp) {
953 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
954 timewait_info->inserted_remote_qp = 0;
955 }
956}
957
958static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
959{
960 struct cm_timewait_info *timewait_info;
961
962 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
963 if (!timewait_info)
964 return ERR_PTR(-ENOMEM);
965
966 timewait_info->work.local_id = local_id;
967 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
968 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
969 return timewait_info;
970}
971
972static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
973{
974 int wait_time;
975 unsigned long flags;
976 struct cm_device *cm_dev;
977
978 lockdep_assert_held(&cm_id_priv->lock);
979
980 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
981 if (!cm_dev)
982 return;
983
984 spin_lock_irqsave(&cm.lock, flags);
985 cm_remove_remote(cm_id_priv);
986 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
987 spin_unlock_irqrestore(&cm.lock, flags);
988
989 /*
990 * The cm_id could be destroyed by the user before we exit timewait.
991 * To protect against this, we search for the cm_id after exiting
992 * timewait before notifying the user that we've exited timewait.
993 */
994 cm_id_priv->id.state = IB_CM_TIMEWAIT;
995 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
996
997 /* Check if the device started its remove_one */
998 spin_lock_irqsave(&cm.lock, flags);
999 if (!cm_dev->going_down)
1000 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1001 msecs_to_jiffies(wait_time));
1002 spin_unlock_irqrestore(&cm.lock, flags);
1003
1004 /*
1005 * The timewait_info is converted into a work and gets freed during
1006 * cm_free_work() in cm_timewait_handler().
1007 */
1008 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1009 cm_id_priv->timewait_info = NULL;
1010}
1011
1012static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1013{
1014 unsigned long flags;
1015
1016 lockdep_assert_held(&cm_id_priv->lock);
1017
1018 cm_id_priv->id.state = IB_CM_IDLE;
1019 if (cm_id_priv->timewait_info) {
1020 spin_lock_irqsave(&cm.lock, flags);
1021 cm_remove_remote(cm_id_priv);
1022 spin_unlock_irqrestore(&cm.lock, flags);
1023 kfree(cm_id_priv->timewait_info);
1024 cm_id_priv->timewait_info = NULL;
1025 }
1026}
1027
1028static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1029{
1030 struct cm_id_private *cm_id_priv;
1031 struct cm_work *work;
1032
1033 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1034 spin_lock_irq(&cm_id_priv->lock);
1035retest:
1036 switch (cm_id->state) {
1037 case IB_CM_LISTEN:
1038 spin_lock(&cm.lock);
1039 if (--cm_id_priv->listen_sharecount > 0) {
1040 /* The id is still shared. */
1041 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1042 spin_unlock(&cm.lock);
1043 spin_unlock_irq(&cm_id_priv->lock);
1044 cm_deref_id(cm_id_priv);
1045 return;
1046 }
1047 cm_id->state = IB_CM_IDLE;
1048 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1049 RB_CLEAR_NODE(&cm_id_priv->service_node);
1050 spin_unlock(&cm.lock);
1051 break;
1052 case IB_CM_SIDR_REQ_SENT:
1053 cm_id->state = IB_CM_IDLE;
1054 ib_cancel_mad(cm_id_priv->msg);
1055 break;
1056 case IB_CM_SIDR_REQ_RCVD:
1057 cm_send_sidr_rep_locked(cm_id_priv,
1058 &(struct ib_cm_sidr_rep_param){
1059 .status = IB_SIDR_REJECT });
1060 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1061 cm_id->state = IB_CM_IDLE;
1062 break;
1063 case IB_CM_REQ_SENT:
1064 case IB_CM_MRA_REQ_RCVD:
1065 ib_cancel_mad(cm_id_priv->msg);
1066 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1067 &cm_id_priv->id.device->node_guid,
1068 sizeof(cm_id_priv->id.device->node_guid),
1069 NULL, 0);
1070 break;
1071 case IB_CM_REQ_RCVD:
1072 if (err == -ENOMEM) {
1073 /* Do not reject to allow future retries. */
1074 cm_reset_to_idle(cm_id_priv);
1075 } else {
1076 cm_send_rej_locked(cm_id_priv,
1077 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1078 NULL, 0);
1079 }
1080 break;
1081 case IB_CM_REP_SENT:
1082 case IB_CM_MRA_REP_RCVD:
1083 ib_cancel_mad(cm_id_priv->msg);
1084 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1085 0, NULL, 0);
1086 goto retest;
1087 case IB_CM_MRA_REQ_SENT:
1088 case IB_CM_REP_RCVD:
1089 case IB_CM_MRA_REP_SENT:
1090 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1091 0, NULL, 0);
1092 break;
1093 case IB_CM_ESTABLISHED:
1094 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1095 cm_id->state = IB_CM_IDLE;
1096 break;
1097 }
1098 cm_send_dreq_locked(cm_id_priv, NULL, 0);
1099 goto retest;
1100 case IB_CM_DREQ_SENT:
1101 ib_cancel_mad(cm_id_priv->msg);
1102 cm_enter_timewait(cm_id_priv);
1103 goto retest;
1104 case IB_CM_DREQ_RCVD:
1105 cm_send_drep_locked(cm_id_priv, NULL, 0);
1106 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1107 goto retest;
1108 case IB_CM_TIMEWAIT:
1109 /*
1110 * The cm_acquire_id in cm_timewait_handler will stop working
1111 * once we do xa_erase below, so just move to idle here for
1112 * consistency.
1113 */
1114 cm_id->state = IB_CM_IDLE;
1115 break;
1116 case IB_CM_IDLE:
1117 break;
1118 }
1119 WARN_ON(cm_id->state != IB_CM_IDLE);
1120
1121 spin_lock(&cm.lock);
1122 /* Required for cleanup paths related cm_req_handler() */
1123 if (cm_id_priv->timewait_info) {
1124 cm_remove_remote(cm_id_priv);
1125 kfree(cm_id_priv->timewait_info);
1126 cm_id_priv->timewait_info = NULL;
1127 }
1128
1129 WARN_ON(cm_id_priv->listen_sharecount);
1130 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1131 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1132 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1133 spin_unlock(&cm.lock);
1134 spin_unlock_irq(&cm_id_priv->lock);
1135
1136 xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1137 cm_deref_id(cm_id_priv);
1138 wait_for_completion(&cm_id_priv->comp);
1139 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1140 cm_free_work(work);
1141
1142 cm_destroy_av(&cm_id_priv->av);
1143 cm_destroy_av(&cm_id_priv->alt_av);
1144 kfree(cm_id_priv->private_data);
1145 kfree_rcu(cm_id_priv, rcu);
1146}
1147
1148void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1149{
1150 cm_destroy_id(cm_id, 0);
1151}
1152EXPORT_SYMBOL(ib_destroy_cm_id);
1153
1154static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
1155{
1156 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1157 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1158 return -EINVAL;
1159
1160 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1161 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1162 else
1163 cm_id_priv->id.service_id = service_id;
1164
1165 return 0;
1166}
1167
1168/**
1169 * ib_cm_listen - Initiates listening on the specified service ID for
1170 * connection and service ID resolution requests.
1171 * @cm_id: Connection identifier associated with the listen request.
1172 * @service_id: Service identifier matched against incoming connection
1173 * and service ID resolution requests. The service ID should be specified
1174 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1175 * assign a service ID to the caller.
1176 */
1177int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
1178{
1179 struct cm_id_private *cm_id_priv =
1180 container_of(cm_id, struct cm_id_private, id);
1181 unsigned long flags;
1182 int ret;
1183
1184 spin_lock_irqsave(&cm_id_priv->lock, flags);
1185 if (cm_id_priv->id.state != IB_CM_IDLE) {
1186 ret = -EINVAL;
1187 goto out;
1188 }
1189
1190 ret = cm_init_listen(cm_id_priv, service_id);
1191 if (ret)
1192 goto out;
1193
1194 if (!cm_insert_listen(cm_id_priv, NULL)) {
1195 ret = -EBUSY;
1196 goto out;
1197 }
1198
1199 cm_id_priv->id.state = IB_CM_LISTEN;
1200 ret = 0;
1201
1202out:
1203 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1204 return ret;
1205}
1206EXPORT_SYMBOL(ib_cm_listen);
1207
1208/**
1209 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1210 * the given service ID.
1211 *
1212 * If there's an existing ID listening on that same device and service ID,
1213 * return it.
1214 *
1215 * @device: Device associated with the cm_id. All related communication will
1216 * be associated with the specified device.
1217 * @cm_handler: Callback invoked to notify the user of CM events.
1218 * @service_id: Service identifier matched against incoming connection
1219 * and service ID resolution requests. The service ID should be specified
1220 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1221 * assign a service ID to the caller.
1222 *
1223 * Callers should call ib_destroy_cm_id when done with the listener ID.
1224 */
1225struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1226 ib_cm_handler cm_handler,
1227 __be64 service_id)
1228{
1229 struct cm_id_private *listen_id_priv;
1230 struct cm_id_private *cm_id_priv;
1231 int err = 0;
1232
1233 /* Create an ID in advance, since the creation may sleep */
1234 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1235 if (IS_ERR(cm_id_priv))
1236 return ERR_CAST(cm_id_priv);
1237
1238 err = cm_init_listen(cm_id_priv, service_id);
1239 if (err) {
1240 ib_destroy_cm_id(&cm_id_priv->id);
1241 return ERR_PTR(err);
1242 }
1243
1244 spin_lock_irq(&cm_id_priv->lock);
1245 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1246 if (listen_id_priv != cm_id_priv) {
1247 spin_unlock_irq(&cm_id_priv->lock);
1248 ib_destroy_cm_id(&cm_id_priv->id);
1249 if (!listen_id_priv)
1250 return ERR_PTR(-EINVAL);
1251 return &listen_id_priv->id;
1252 }
1253 cm_id_priv->id.state = IB_CM_LISTEN;
1254 spin_unlock_irq(&cm_id_priv->lock);
1255
1256 /*
1257 * A listen ID does not need to be in the xarray since it does not
1258 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1259 * and does not enter timewait.
1260 */
1261
1262 return &cm_id_priv->id;
1263}
1264EXPORT_SYMBOL(ib_cm_insert_listen);
1265
1266static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1267{
1268 u64 hi_tid = 0, low_tid;
1269
1270 lockdep_assert_held(&cm_id_priv->lock);
1271
1272 low_tid = (u64)cm_id_priv->id.local_id;
1273 if (!cm_id_priv->av.port)
1274 return cpu_to_be64(low_tid);
1275
1276 spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1277 if (cm_id_priv->av.port->mad_agent)
1278 hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1279 spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1280 return cpu_to_be64(hi_tid | low_tid);
1281}
1282
1283static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1284 __be16 attr_id, __be64 tid)
1285{
1286 hdr->base_version = IB_MGMT_BASE_VERSION;
1287 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1288 hdr->class_version = IB_CM_CLASS_VERSION;
1289 hdr->method = IB_MGMT_METHOD_SEND;
1290 hdr->attr_id = attr_id;
1291 hdr->tid = tid;
1292}
1293
1294static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1295 __be64 tid, u32 attr_mod)
1296{
1297 cm_format_mad_hdr(hdr, attr_id, tid);
1298 hdr->attr_mod = cpu_to_be32(attr_mod);
1299}
1300
1301static void cm_format_req(struct cm_req_msg *req_msg,
1302 struct cm_id_private *cm_id_priv,
1303 struct ib_cm_req_param *param)
1304{
1305 struct sa_path_rec *pri_path = param->primary_path;
1306 struct sa_path_rec *alt_path = param->alternate_path;
1307 bool pri_ext = false;
1308 __be16 lid;
1309
1310 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1311 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1312 pri_path->opa.slid);
1313
1314 cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1315 cm_form_tid(cm_id_priv), param->ece.attr_mod);
1316
1317 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1318 be32_to_cpu(cm_id_priv->id.local_id));
1319 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1320 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1321 be64_to_cpu(cm_id_priv->id.device->node_guid));
1322 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1323 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1324 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1325 param->remote_cm_response_timeout);
1326 cm_req_set_qp_type(req_msg, param->qp_type);
1327 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1328 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1329 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1330 param->local_cm_response_timeout);
1331 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1332 be16_to_cpu(param->primary_path->pkey));
1333 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1334 param->primary_path->mtu);
1335 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1336
1337 if (param->qp_type != IB_QPT_XRC_INI) {
1338 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1339 param->responder_resources);
1340 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1341 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1342 param->rnr_retry_count);
1343 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1344 }
1345
1346 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1347 pri_path->sgid;
1348 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1349 pri_path->dgid;
1350 if (pri_ext) {
1351 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1352 ->global.interface_id =
1353 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1354 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1355 ->global.interface_id =
1356 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1357 }
1358 if (pri_path->hop_limit <= 1) {
1359 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1360 be16_to_cpu(pri_ext ? 0 :
1361 htons(ntohl(sa_path_get_slid(
1362 pri_path)))));
1363 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1364 be16_to_cpu(pri_ext ? 0 :
1365 htons(ntohl(sa_path_get_dlid(
1366 pri_path)))));
1367 } else {
1368
1369 if (param->primary_path_inbound) {
1370 lid = param->primary_path_inbound->ib.dlid;
1371 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1372 be16_to_cpu(lid));
1373 } else
1374 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1375 be16_to_cpu(IB_LID_PERMISSIVE));
1376
1377 /* Work-around until there's a way to obtain remote LID info */
1378 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1379 be16_to_cpu(IB_LID_PERMISSIVE));
1380 }
1381 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1382 be32_to_cpu(pri_path->flow_label));
1383 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1384 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1385 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1386 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1387 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1388 (pri_path->hop_limit <= 1));
1389 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1390 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1391 pri_path->packet_life_time));
1392
1393 if (alt_path) {
1394 bool alt_ext = false;
1395
1396 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1397 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1398 alt_path->opa.slid);
1399
1400 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1401 alt_path->sgid;
1402 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1403 alt_path->dgid;
1404 if (alt_ext) {
1405 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1406 req_msg)
1407 ->global.interface_id =
1408 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1409 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1410 req_msg)
1411 ->global.interface_id =
1412 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1413 }
1414 if (alt_path->hop_limit <= 1) {
1415 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1416 be16_to_cpu(
1417 alt_ext ? 0 :
1418 htons(ntohl(sa_path_get_slid(
1419 alt_path)))));
1420 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1421 be16_to_cpu(
1422 alt_ext ? 0 :
1423 htons(ntohl(sa_path_get_dlid(
1424 alt_path)))));
1425 } else {
1426 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1427 be16_to_cpu(IB_LID_PERMISSIVE));
1428 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1429 be16_to_cpu(IB_LID_PERMISSIVE));
1430 }
1431 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1432 be32_to_cpu(alt_path->flow_label));
1433 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1434 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1435 alt_path->traffic_class);
1436 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1437 alt_path->hop_limit);
1438 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1439 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1440 (alt_path->hop_limit <= 1));
1441 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1442 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1443 alt_path->packet_life_time));
1444 }
1445 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1446
1447 if (param->private_data && param->private_data_len)
1448 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1449 param->private_data_len);
1450}
1451
1452static int cm_validate_req_param(struct ib_cm_req_param *param)
1453{
1454 if (!param->primary_path)
1455 return -EINVAL;
1456
1457 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1458 param->qp_type != IB_QPT_XRC_INI)
1459 return -EINVAL;
1460
1461 if (param->private_data &&
1462 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1463 return -EINVAL;
1464
1465 if (param->alternate_path &&
1466 (param->alternate_path->pkey != param->primary_path->pkey ||
1467 param->alternate_path->mtu != param->primary_path->mtu))
1468 return -EINVAL;
1469
1470 return 0;
1471}
1472
1473int ib_send_cm_req(struct ib_cm_id *cm_id,
1474 struct ib_cm_req_param *param)
1475{
1476 struct cm_av av = {}, alt_av = {};
1477 struct cm_id_private *cm_id_priv;
1478 struct ib_mad_send_buf *msg;
1479 struct cm_req_msg *req_msg;
1480 unsigned long flags;
1481 int ret;
1482
1483 ret = cm_validate_req_param(param);
1484 if (ret)
1485 return ret;
1486
1487 /* Verify that we're not in timewait. */
1488 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1489 spin_lock_irqsave(&cm_id_priv->lock, flags);
1490 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1491 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1492 return -EINVAL;
1493 }
1494 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1495
1496 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1497 id.local_id);
1498 if (IS_ERR(cm_id_priv->timewait_info)) {
1499 ret = PTR_ERR(cm_id_priv->timewait_info);
1500 cm_id_priv->timewait_info = NULL;
1501 return ret;
1502 }
1503
1504 ret = cm_init_av_by_path(param->primary_path,
1505 param->ppath_sgid_attr, &av);
1506 if (ret)
1507 return ret;
1508 if (param->alternate_path) {
1509 ret = cm_init_av_by_path(param->alternate_path, NULL,
1510 &alt_av);
1511 if (ret) {
1512 cm_destroy_av(&av);
1513 return ret;
1514 }
1515 }
1516 cm_id->service_id = param->service_id;
1517 cm_id_priv->timeout_ms = cm_convert_to_ms(
1518 param->primary_path->packet_life_time) * 2 +
1519 cm_convert_to_ms(
1520 param->remote_cm_response_timeout);
1521 cm_id_priv->max_cm_retries = param->max_cm_retries;
1522 cm_id_priv->initiator_depth = param->initiator_depth;
1523 cm_id_priv->responder_resources = param->responder_resources;
1524 cm_id_priv->retry_count = param->retry_count;
1525 cm_id_priv->path_mtu = param->primary_path->mtu;
1526 cm_id_priv->pkey = param->primary_path->pkey;
1527 cm_id_priv->qp_type = param->qp_type;
1528
1529 spin_lock_irqsave(&cm_id_priv->lock, flags);
1530
1531 cm_move_av_from_path(&cm_id_priv->av, &av);
1532 if (param->primary_path_outbound)
1533 cm_id_priv->av.dlid_datapath =
1534 be16_to_cpu(param->primary_path_outbound->ib.dlid);
1535
1536 if (param->alternate_path)
1537 cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
1538
1539 msg = cm_alloc_priv_msg(cm_id_priv);
1540 if (IS_ERR(msg)) {
1541 ret = PTR_ERR(msg);
1542 goto out_unlock;
1543 }
1544
1545 req_msg = (struct cm_req_msg *)msg->mad;
1546 cm_format_req(req_msg, cm_id_priv, param);
1547 cm_id_priv->tid = req_msg->hdr.tid;
1548 msg->timeout_ms = cm_id_priv->timeout_ms;
1549 msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
1550
1551 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1552 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1553
1554 trace_icm_send_req(&cm_id_priv->id);
1555 ret = ib_post_send_mad(msg, NULL);
1556 if (ret)
1557 goto out_free;
1558 BUG_ON(cm_id->state != IB_CM_IDLE);
1559 cm_id->state = IB_CM_REQ_SENT;
1560 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1561 return 0;
1562out_free:
1563 cm_free_priv_msg(msg);
1564out_unlock:
1565 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1566 return ret;
1567}
1568EXPORT_SYMBOL(ib_send_cm_req);
1569
1570static int cm_issue_rej(struct cm_port *port,
1571 struct ib_mad_recv_wc *mad_recv_wc,
1572 enum ib_cm_rej_reason reason,
1573 enum cm_msg_response msg_rejected,
1574 void *ari, u8 ari_length)
1575{
1576 struct ib_mad_send_buf *msg = NULL;
1577 struct cm_rej_msg *rej_msg, *rcv_msg;
1578 int ret;
1579
1580 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1581 if (ret)
1582 return ret;
1583
1584 /* We just need common CM header information. Cast to any message. */
1585 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1586 rej_msg = (struct cm_rej_msg *) msg->mad;
1587
1588 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1589 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1590 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1591 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1592 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1593 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1594 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1595
1596 if (ari && ari_length) {
1597 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1598 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1599 }
1600
1601 trace_icm_issue_rej(
1602 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1603 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1604 ret = ib_post_send_mad(msg, NULL);
1605 if (ret)
1606 cm_free_response_msg(msg);
1607
1608 return ret;
1609}
1610
1611static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1612{
1613 return ((cpu_to_be16(
1614 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1615 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1616 req_msg))));
1617}
1618
1619static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
1620 struct sa_path_rec *path, union ib_gid *gid)
1621{
1622 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1623 path->rec_type = SA_PATH_REC_TYPE_OPA;
1624 else
1625 path->rec_type = SA_PATH_REC_TYPE_IB;
1626}
1627
1628static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1629 struct sa_path_rec *primary_path,
1630 struct sa_path_rec *alt_path,
1631 struct ib_wc *wc)
1632{
1633 u32 lid;
1634
1635 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1636 sa_path_set_dlid(primary_path, wc->slid);
1637 sa_path_set_slid(primary_path,
1638 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1639 req_msg));
1640 } else {
1641 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1642 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1643 sa_path_set_dlid(primary_path, lid);
1644
1645 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1646 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1647 sa_path_set_slid(primary_path, lid);
1648 }
1649
1650 if (!cm_req_has_alt_path(req_msg))
1651 return;
1652
1653 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1654 sa_path_set_dlid(alt_path,
1655 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1656 req_msg));
1657 sa_path_set_slid(alt_path,
1658 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1659 req_msg));
1660 } else {
1661 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1662 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1663 sa_path_set_dlid(alt_path, lid);
1664
1665 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1666 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1667 sa_path_set_slid(alt_path, lid);
1668 }
1669}
1670
1671static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1672 struct sa_path_rec *primary_path,
1673 struct sa_path_rec *alt_path,
1674 struct ib_wc *wc)
1675{
1676 primary_path->dgid =
1677 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1678 primary_path->sgid =
1679 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1680 primary_path->flow_label =
1681 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1682 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1683 primary_path->traffic_class =
1684 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1685 primary_path->reversible = 1;
1686 primary_path->pkey =
1687 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1688 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1689 primary_path->mtu_selector = IB_SA_EQ;
1690 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1691 primary_path->rate_selector = IB_SA_EQ;
1692 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1693 primary_path->packet_life_time_selector = IB_SA_EQ;
1694 primary_path->packet_life_time =
1695 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1696 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1697 primary_path->service_id =
1698 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1699 if (sa_path_is_roce(primary_path))
1700 primary_path->roce.route_resolved = false;
1701
1702 if (cm_req_has_alt_path(req_msg)) {
1703 alt_path->dgid = *IBA_GET_MEM_PTR(
1704 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1705 alt_path->sgid = *IBA_GET_MEM_PTR(
1706 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1707 alt_path->flow_label = cpu_to_be32(
1708 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1709 alt_path->hop_limit =
1710 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1711 alt_path->traffic_class =
1712 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1713 alt_path->reversible = 1;
1714 alt_path->pkey =
1715 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1716 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1717 alt_path->mtu_selector = IB_SA_EQ;
1718 alt_path->mtu =
1719 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1720 alt_path->rate_selector = IB_SA_EQ;
1721 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1722 alt_path->packet_life_time_selector = IB_SA_EQ;
1723 alt_path->packet_life_time =
1724 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1725 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1726 alt_path->service_id =
1727 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1728
1729 if (sa_path_is_roce(alt_path))
1730 alt_path->roce.route_resolved = false;
1731 }
1732 cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
1733}
1734
1735static u16 cm_get_bth_pkey(struct cm_work *work)
1736{
1737 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1738 u32 port_num = work->port->port_num;
1739 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1740 u16 pkey;
1741 int ret;
1742
1743 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1744 if (ret) {
1745 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
1746 port_num, pkey_index, ret);
1747 return 0;
1748 }
1749
1750 return pkey;
1751}
1752
1753/**
1754 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1755 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1756 * reject them as the local_gid will not match the sgid. Therefore,
1757 * change the pathrec's SGID to an IB SGID.
1758 *
1759 * @work: Work completion
1760 * @path: Path record
1761 */
1762static void cm_opa_to_ib_sgid(struct cm_work *work,
1763 struct sa_path_rec *path)
1764{
1765 struct ib_device *dev = work->port->cm_dev->ib_device;
1766 u32 port_num = work->port->port_num;
1767
1768 if (rdma_cap_opa_ah(dev, port_num) &&
1769 (ib_is_opa_gid(&path->sgid))) {
1770 union ib_gid sgid;
1771
1772 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1773 dev_warn(&dev->dev,
1774 "Error updating sgid in CM request\n");
1775 return;
1776 }
1777
1778 path->sgid = sgid;
1779 }
1780}
1781
1782static void cm_format_req_event(struct cm_work *work,
1783 struct cm_id_private *cm_id_priv,
1784 struct ib_cm_id *listen_id)
1785{
1786 struct cm_req_msg *req_msg;
1787 struct ib_cm_req_event_param *param;
1788
1789 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1790 param = &work->cm_event.param.req_rcvd;
1791 param->listen_id = listen_id;
1792 param->bth_pkey = cm_get_bth_pkey(work);
1793 param->port = cm_id_priv->av.port->port_num;
1794 param->primary_path = &work->path[0];
1795 cm_opa_to_ib_sgid(work, param->primary_path);
1796 if (cm_req_has_alt_path(req_msg)) {
1797 param->alternate_path = &work->path[1];
1798 cm_opa_to_ib_sgid(work, param->alternate_path);
1799 } else {
1800 param->alternate_path = NULL;
1801 }
1802 param->remote_ca_guid =
1803 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1804 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1805 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1806 param->qp_type = cm_req_get_qp_type(req_msg);
1807 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1808 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1809 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1810 param->local_cm_response_timeout =
1811 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1812 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1813 param->remote_cm_response_timeout =
1814 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1815 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1816 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1817 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1818 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1819 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1820 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1821
1822 work->cm_event.private_data =
1823 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1824}
1825
1826static void cm_process_work(struct cm_id_private *cm_id_priv,
1827 struct cm_work *work)
1828{
1829 int ret;
1830
1831 /* We will typically only have the current event to report. */
1832 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1833 cm_free_work(work);
1834
1835 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1836 spin_lock_irq(&cm_id_priv->lock);
1837 work = cm_dequeue_work(cm_id_priv);
1838 spin_unlock_irq(&cm_id_priv->lock);
1839 if (!work)
1840 return;
1841
1842 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1843 &work->cm_event);
1844 cm_free_work(work);
1845 }
1846 cm_deref_id(cm_id_priv);
1847 if (ret)
1848 cm_destroy_id(&cm_id_priv->id, ret);
1849}
1850
1851static void cm_format_mra(struct cm_mra_msg *mra_msg,
1852 struct cm_id_private *cm_id_priv,
1853 enum cm_msg_response msg_mraed, u8 service_timeout,
1854 const void *private_data, u8 private_data_len)
1855{
1856 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1857 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1858 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1859 be32_to_cpu(cm_id_priv->id.local_id));
1860 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1861 be32_to_cpu(cm_id_priv->id.remote_id));
1862 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1863
1864 if (private_data && private_data_len)
1865 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1866 private_data_len);
1867}
1868
1869static void cm_format_rej(struct cm_rej_msg *rej_msg,
1870 struct cm_id_private *cm_id_priv,
1871 enum ib_cm_rej_reason reason, void *ari,
1872 u8 ari_length, const void *private_data,
1873 u8 private_data_len, enum ib_cm_state state)
1874{
1875 lockdep_assert_held(&cm_id_priv->lock);
1876
1877 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1878 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1879 be32_to_cpu(cm_id_priv->id.remote_id));
1880
1881 switch (state) {
1882 case IB_CM_REQ_RCVD:
1883 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1884 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1885 break;
1886 case IB_CM_MRA_REQ_SENT:
1887 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1888 be32_to_cpu(cm_id_priv->id.local_id));
1889 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1890 break;
1891 case IB_CM_REP_RCVD:
1892 case IB_CM_MRA_REP_SENT:
1893 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1894 be32_to_cpu(cm_id_priv->id.local_id));
1895 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1896 break;
1897 default:
1898 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1899 be32_to_cpu(cm_id_priv->id.local_id));
1900 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1901 CM_MSG_RESPONSE_OTHER);
1902 break;
1903 }
1904
1905 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1906 if (ari && ari_length) {
1907 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1908 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1909 }
1910
1911 if (private_data && private_data_len)
1912 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1913 private_data_len);
1914}
1915
1916static void cm_dup_req_handler(struct cm_work *work,
1917 struct cm_id_private *cm_id_priv)
1918{
1919 struct ib_mad_send_buf *msg = NULL;
1920 int ret;
1921
1922 atomic_long_inc(
1923 &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
1924
1925 /* Quick state check to discard duplicate REQs. */
1926 spin_lock_irq(&cm_id_priv->lock);
1927 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1928 spin_unlock_irq(&cm_id_priv->lock);
1929 return;
1930 }
1931 spin_unlock_irq(&cm_id_priv->lock);
1932
1933 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1934 if (ret)
1935 return;
1936
1937 spin_lock_irq(&cm_id_priv->lock);
1938 switch (cm_id_priv->id.state) {
1939 case IB_CM_MRA_REQ_SENT:
1940 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1941 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1942 cm_id_priv->private_data,
1943 cm_id_priv->private_data_len);
1944 break;
1945 case IB_CM_TIMEWAIT:
1946 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1947 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1948 IB_CM_TIMEWAIT);
1949 break;
1950 default:
1951 goto unlock;
1952 }
1953 spin_unlock_irq(&cm_id_priv->lock);
1954
1955 trace_icm_send_dup_req(&cm_id_priv->id);
1956 ret = ib_post_send_mad(msg, NULL);
1957 if (ret)
1958 goto free;
1959 return;
1960
1961unlock: spin_unlock_irq(&cm_id_priv->lock);
1962free: cm_free_response_msg(msg);
1963}
1964
1965static struct cm_id_private *cm_match_req(struct cm_work *work,
1966 struct cm_id_private *cm_id_priv)
1967{
1968 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1969 struct cm_timewait_info *timewait_info;
1970 struct cm_req_msg *req_msg;
1971
1972 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1973
1974 /* Check for possible duplicate REQ. */
1975 spin_lock_irq(&cm.lock);
1976 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1977 if (timewait_info) {
1978 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1979 timewait_info->work.remote_id);
1980 spin_unlock_irq(&cm.lock);
1981 if (cur_cm_id_priv) {
1982 cm_dup_req_handler(work, cur_cm_id_priv);
1983 cm_deref_id(cur_cm_id_priv);
1984 }
1985 return NULL;
1986 }
1987
1988 /* Check for stale connections. */
1989 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1990 if (timewait_info) {
1991 cm_remove_remote(cm_id_priv);
1992 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1993 timewait_info->work.remote_id);
1994
1995 spin_unlock_irq(&cm.lock);
1996 cm_issue_rej(work->port, work->mad_recv_wc,
1997 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1998 NULL, 0);
1999 if (cur_cm_id_priv) {
2000 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2001 cm_deref_id(cur_cm_id_priv);
2002 }
2003 return NULL;
2004 }
2005
2006 /* Find matching listen request. */
2007 listen_cm_id_priv = cm_find_listen(
2008 cm_id_priv->id.device,
2009 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2010 if (!listen_cm_id_priv) {
2011 cm_remove_remote(cm_id_priv);
2012 spin_unlock_irq(&cm.lock);
2013 cm_issue_rej(work->port, work->mad_recv_wc,
2014 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2015 NULL, 0);
2016 return NULL;
2017 }
2018 spin_unlock_irq(&cm.lock);
2019 return listen_cm_id_priv;
2020}
2021
2022/*
2023 * Work-around for inter-subnet connections. If the LIDs are permissive,
2024 * we need to override the LID/SL data in the REQ with the LID information
2025 * in the work completion.
2026 */
2027static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2028{
2029 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2030 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2031 req_msg)) == IB_LID_PERMISSIVE) {
2032 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2033 be16_to_cpu(ib_lid_be16(wc->slid)));
2034 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2035 }
2036
2037 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2038 req_msg)) == IB_LID_PERMISSIVE)
2039 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2040 wc->dlid_path_bits);
2041 }
2042
2043 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2044 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2045 req_msg)) == IB_LID_PERMISSIVE) {
2046 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2047 be16_to_cpu(ib_lid_be16(wc->slid)));
2048 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2049 }
2050
2051 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2052 req_msg)) == IB_LID_PERMISSIVE)
2053 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2054 wc->dlid_path_bits);
2055 }
2056}
2057
2058static int cm_req_handler(struct cm_work *work)
2059{
2060 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2061 struct cm_req_msg *req_msg;
2062 const struct ib_global_route *grh;
2063 const struct ib_gid_attr *gid_attr;
2064 int ret;
2065
2066 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2067
2068 cm_id_priv =
2069 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2070 if (IS_ERR(cm_id_priv))
2071 return PTR_ERR(cm_id_priv);
2072
2073 cm_id_priv->id.remote_id =
2074 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2075 cm_id_priv->id.service_id =
2076 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2077 cm_id_priv->tid = req_msg->hdr.tid;
2078 cm_id_priv->timeout_ms = cm_convert_to_ms(
2079 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2080 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2081 cm_id_priv->remote_qpn =
2082 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2083 cm_id_priv->initiator_depth =
2084 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2085 cm_id_priv->responder_resources =
2086 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2087 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2088 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2089 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2090 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2091 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2092 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2093
2094 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2095 work->mad_recv_wc->recv_buf.grh,
2096 &cm_id_priv->av);
2097 if (ret)
2098 goto destroy;
2099 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2100 id.local_id);
2101 if (IS_ERR(cm_id_priv->timewait_info)) {
2102 ret = PTR_ERR(cm_id_priv->timewait_info);
2103 cm_id_priv->timewait_info = NULL;
2104 goto destroy;
2105 }
2106 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2107 cm_id_priv->timewait_info->remote_ca_guid =
2108 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2109 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2110
2111 /*
2112 * Note that the ID pointer is not in the xarray at this point,
2113 * so this set is only visible to the local thread.
2114 */
2115 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2116
2117 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2118 if (!listen_cm_id_priv) {
2119 trace_icm_no_listener_err(&cm_id_priv->id);
2120 cm_id_priv->id.state = IB_CM_IDLE;
2121 ret = -EINVAL;
2122 goto destroy;
2123 }
2124
2125 memset(&work->path[0], 0, sizeof(work->path[0]));
2126 if (cm_req_has_alt_path(req_msg))
2127 memset(&work->path[1], 0, sizeof(work->path[1]));
2128 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2129 gid_attr = grh->sgid_attr;
2130
2131 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
2132 work->path[0].rec_type =
2133 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2134 } else {
2135 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2136 cm_path_set_rec_type(
2137 work->port->cm_dev->ib_device, work->port->port_num,
2138 &work->path[0],
2139 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2140 req_msg));
2141 }
2142 if (cm_req_has_alt_path(req_msg))
2143 work->path[1].rec_type = work->path[0].rec_type;
2144 cm_format_paths_from_req(req_msg, &work->path[0],
2145 &work->path[1], work->mad_recv_wc->wc);
2146 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2147 sa_path_set_dmac(&work->path[0],
2148 cm_id_priv->av.ah_attr.roce.dmac);
2149 work->path[0].hop_limit = grh->hop_limit;
2150
2151 /* This destroy call is needed to pair with cm_init_av_for_response */
2152 cm_destroy_av(&cm_id_priv->av);
2153 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
2154 if (ret) {
2155 int err;
2156
2157 err = rdma_query_gid(work->port->cm_dev->ib_device,
2158 work->port->port_num, 0,
2159 &work->path[0].sgid);
2160 if (err)
2161 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2162 NULL, 0, NULL, 0);
2163 else
2164 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2165 &work->path[0].sgid,
2166 sizeof(work->path[0].sgid),
2167 NULL, 0);
2168 goto rejected;
2169 }
2170 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
2171 cm_id_priv->av.dlid_datapath =
2172 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
2173
2174 if (cm_req_has_alt_path(req_msg)) {
2175 ret = cm_init_av_by_path(&work->path[1], NULL,
2176 &cm_id_priv->alt_av);
2177 if (ret) {
2178 ib_send_cm_rej(&cm_id_priv->id,
2179 IB_CM_REJ_INVALID_ALT_GID,
2180 &work->path[0].sgid,
2181 sizeof(work->path[0].sgid), NULL, 0);
2182 goto rejected;
2183 }
2184 }
2185
2186 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2187 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2188 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2189
2190 /* Now MAD handlers can see the new ID */
2191 spin_lock_irq(&cm_id_priv->lock);
2192 cm_finalize_id(cm_id_priv);
2193
2194 /* Refcount belongs to the event, pairs with cm_process_work() */
2195 refcount_inc(&cm_id_priv->refcount);
2196 cm_queue_work_unlock(cm_id_priv, work);
2197 /*
2198 * Since this ID was just created and was not made visible to other MAD
2199 * handlers until the cm_finalize_id() above we know that the
2200 * cm_process_work() will deliver the event and the listen_cm_id
2201 * embedded in the event can be derefed here.
2202 */
2203 cm_deref_id(listen_cm_id_priv);
2204 return 0;
2205
2206rejected:
2207 cm_deref_id(listen_cm_id_priv);
2208destroy:
2209 ib_destroy_cm_id(&cm_id_priv->id);
2210 return ret;
2211}
2212
2213static void cm_format_rep(struct cm_rep_msg *rep_msg,
2214 struct cm_id_private *cm_id_priv,
2215 struct ib_cm_rep_param *param)
2216{
2217 cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2218 param->ece.attr_mod);
2219 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2220 be32_to_cpu(cm_id_priv->id.local_id));
2221 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2222 be32_to_cpu(cm_id_priv->id.remote_id));
2223 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2224 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2225 param->responder_resources);
2226 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2227 cm_id_priv->av.port->cm_dev->ack_delay);
2228 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2229 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2230 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2231 be64_to_cpu(cm_id_priv->id.device->node_guid));
2232
2233 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2234 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2235 param->initiator_depth);
2236 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2237 param->flow_control);
2238 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2239 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2240 } else {
2241 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2242 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2243 }
2244
2245 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2246 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2247 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2248
2249 if (param->private_data && param->private_data_len)
2250 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2251 param->private_data_len);
2252}
2253
2254int ib_send_cm_rep(struct ib_cm_id *cm_id,
2255 struct ib_cm_rep_param *param)
2256{
2257 struct cm_id_private *cm_id_priv;
2258 struct ib_mad_send_buf *msg;
2259 struct cm_rep_msg *rep_msg;
2260 unsigned long flags;
2261 int ret;
2262
2263 if (param->private_data &&
2264 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2265 return -EINVAL;
2266
2267 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2268 spin_lock_irqsave(&cm_id_priv->lock, flags);
2269 if (cm_id->state != IB_CM_REQ_RCVD &&
2270 cm_id->state != IB_CM_MRA_REQ_SENT) {
2271 trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2272 ret = -EINVAL;
2273 goto out;
2274 }
2275
2276 msg = cm_alloc_priv_msg(cm_id_priv);
2277 if (IS_ERR(msg)) {
2278 ret = PTR_ERR(msg);
2279 goto out;
2280 }
2281
2282 rep_msg = (struct cm_rep_msg *) msg->mad;
2283 cm_format_rep(rep_msg, cm_id_priv, param);
2284 msg->timeout_ms = cm_id_priv->timeout_ms;
2285 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2286
2287 trace_icm_send_rep(cm_id);
2288 ret = ib_post_send_mad(msg, NULL);
2289 if (ret)
2290 goto out_free;
2291
2292 cm_id->state = IB_CM_REP_SENT;
2293 cm_id_priv->initiator_depth = param->initiator_depth;
2294 cm_id_priv->responder_resources = param->responder_resources;
2295 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2296 WARN_ONCE(param->qp_num & 0xFF000000,
2297 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2298 param->qp_num);
2299 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2300 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2301 return 0;
2302
2303out_free:
2304 cm_free_priv_msg(msg);
2305out:
2306 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2307 return ret;
2308}
2309EXPORT_SYMBOL(ib_send_cm_rep);
2310
2311static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2312 struct cm_id_private *cm_id_priv,
2313 const void *private_data,
2314 u8 private_data_len)
2315{
2316 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2317 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2318 be32_to_cpu(cm_id_priv->id.local_id));
2319 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2320 be32_to_cpu(cm_id_priv->id.remote_id));
2321
2322 if (private_data && private_data_len)
2323 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2324 private_data_len);
2325}
2326
2327int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2328 const void *private_data,
2329 u8 private_data_len)
2330{
2331 struct cm_id_private *cm_id_priv;
2332 struct ib_mad_send_buf *msg;
2333 unsigned long flags;
2334 void *data;
2335 int ret;
2336
2337 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2338 return -EINVAL;
2339
2340 data = cm_copy_private_data(private_data, private_data_len);
2341 if (IS_ERR(data))
2342 return PTR_ERR(data);
2343
2344 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2345 spin_lock_irqsave(&cm_id_priv->lock, flags);
2346 if (cm_id->state != IB_CM_REP_RCVD &&
2347 cm_id->state != IB_CM_MRA_REP_SENT) {
2348 trace_icm_send_cm_rtu_err(cm_id);
2349 ret = -EINVAL;
2350 goto error;
2351 }
2352
2353 msg = cm_alloc_msg(cm_id_priv);
2354 if (IS_ERR(msg)) {
2355 ret = PTR_ERR(msg);
2356 goto error;
2357 }
2358
2359 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2360 private_data, private_data_len);
2361
2362 trace_icm_send_rtu(cm_id);
2363 ret = ib_post_send_mad(msg, NULL);
2364 if (ret) {
2365 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2366 cm_free_msg(msg);
2367 kfree(data);
2368 return ret;
2369 }
2370
2371 cm_id->state = IB_CM_ESTABLISHED;
2372 cm_set_private_data(cm_id_priv, data, private_data_len);
2373 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2374 return 0;
2375
2376error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2377 kfree(data);
2378 return ret;
2379}
2380EXPORT_SYMBOL(ib_send_cm_rtu);
2381
2382static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2383{
2384 struct cm_rep_msg *rep_msg;
2385 struct ib_cm_rep_event_param *param;
2386
2387 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2388 param = &work->cm_event.param.rep_rcvd;
2389 param->remote_ca_guid =
2390 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2391 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2392 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2393 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2394 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2395 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2396 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2397 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2398 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2399 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2400 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2401 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2402 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2403 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2404 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2405
2406 work->cm_event.private_data =
2407 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2408}
2409
2410static void cm_dup_rep_handler(struct cm_work *work)
2411{
2412 struct cm_id_private *cm_id_priv;
2413 struct cm_rep_msg *rep_msg;
2414 struct ib_mad_send_buf *msg = NULL;
2415 int ret;
2416
2417 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2418 cm_id_priv = cm_acquire_id(
2419 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2420 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2421 if (!cm_id_priv)
2422 return;
2423
2424 atomic_long_inc(
2425 &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
2426 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2427 if (ret)
2428 goto deref;
2429
2430 spin_lock_irq(&cm_id_priv->lock);
2431 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2432 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2433 cm_id_priv->private_data,
2434 cm_id_priv->private_data_len);
2435 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2436 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2437 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2438 cm_id_priv->private_data,
2439 cm_id_priv->private_data_len);
2440 else
2441 goto unlock;
2442 spin_unlock_irq(&cm_id_priv->lock);
2443
2444 trace_icm_send_dup_rep(&cm_id_priv->id);
2445 ret = ib_post_send_mad(msg, NULL);
2446 if (ret)
2447 goto free;
2448 goto deref;
2449
2450unlock: spin_unlock_irq(&cm_id_priv->lock);
2451free: cm_free_response_msg(msg);
2452deref: cm_deref_id(cm_id_priv);
2453}
2454
2455static int cm_rep_handler(struct cm_work *work)
2456{
2457 struct cm_id_private *cm_id_priv;
2458 struct cm_rep_msg *rep_msg;
2459 int ret;
2460 struct cm_id_private *cur_cm_id_priv;
2461 struct cm_timewait_info *timewait_info;
2462
2463 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2464 cm_id_priv = cm_acquire_id(
2465 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2466 if (!cm_id_priv) {
2467 cm_dup_rep_handler(work);
2468 trace_icm_remote_no_priv_err(
2469 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2470 return -EINVAL;
2471 }
2472
2473 cm_format_rep_event(work, cm_id_priv->qp_type);
2474
2475 spin_lock_irq(&cm_id_priv->lock);
2476 switch (cm_id_priv->id.state) {
2477 case IB_CM_REQ_SENT:
2478 case IB_CM_MRA_REQ_RCVD:
2479 break;
2480 default:
2481 ret = -EINVAL;
2482 trace_icm_rep_unknown_err(
2483 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2484 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2485 cm_id_priv->id.state);
2486 spin_unlock_irq(&cm_id_priv->lock);
2487 goto error;
2488 }
2489
2490 cm_id_priv->timewait_info->work.remote_id =
2491 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2492 cm_id_priv->timewait_info->remote_ca_guid =
2493 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2494 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2495
2496 spin_lock(&cm.lock);
2497 /* Check for duplicate REP. */
2498 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2499 spin_unlock(&cm.lock);
2500 spin_unlock_irq(&cm_id_priv->lock);
2501 ret = -EINVAL;
2502 trace_icm_insert_failed_err(
2503 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2504 goto error;
2505 }
2506 /* Check for a stale connection. */
2507 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2508 if (timewait_info) {
2509 cm_remove_remote(cm_id_priv);
2510 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2511 timewait_info->work.remote_id);
2512
2513 spin_unlock(&cm.lock);
2514 spin_unlock_irq(&cm_id_priv->lock);
2515 cm_issue_rej(work->port, work->mad_recv_wc,
2516 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2517 NULL, 0);
2518 ret = -EINVAL;
2519 trace_icm_staleconn_err(
2520 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2521 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2522
2523 if (cur_cm_id_priv) {
2524 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2525 cm_deref_id(cur_cm_id_priv);
2526 }
2527
2528 goto error;
2529 }
2530 spin_unlock(&cm.lock);
2531
2532 cm_id_priv->id.state = IB_CM_REP_RCVD;
2533 cm_id_priv->id.remote_id =
2534 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2535 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2536 cm_id_priv->initiator_depth =
2537 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2538 cm_id_priv->responder_resources =
2539 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2540 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2541 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2542 cm_id_priv->target_ack_delay =
2543 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2544 cm_id_priv->av.timeout =
2545 cm_ack_timeout(cm_id_priv->target_ack_delay,
2546 cm_id_priv->av.timeout - 1);
2547 cm_id_priv->alt_av.timeout =
2548 cm_ack_timeout(cm_id_priv->target_ack_delay,
2549 cm_id_priv->alt_av.timeout - 1);
2550
2551 ib_cancel_mad(cm_id_priv->msg);
2552 cm_queue_work_unlock(cm_id_priv, work);
2553 return 0;
2554
2555error:
2556 cm_deref_id(cm_id_priv);
2557 return ret;
2558}
2559
2560static int cm_establish_handler(struct cm_work *work)
2561{
2562 struct cm_id_private *cm_id_priv;
2563
2564 /* See comment in cm_establish about lookup. */
2565 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2566 if (!cm_id_priv)
2567 return -EINVAL;
2568
2569 spin_lock_irq(&cm_id_priv->lock);
2570 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2571 spin_unlock_irq(&cm_id_priv->lock);
2572 goto out;
2573 }
2574
2575 ib_cancel_mad(cm_id_priv->msg);
2576 cm_queue_work_unlock(cm_id_priv, work);
2577 return 0;
2578out:
2579 cm_deref_id(cm_id_priv);
2580 return -EINVAL;
2581}
2582
2583static int cm_rtu_handler(struct cm_work *work)
2584{
2585 struct cm_id_private *cm_id_priv;
2586 struct cm_rtu_msg *rtu_msg;
2587
2588 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2589 cm_id_priv = cm_acquire_id(
2590 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2591 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2592 if (!cm_id_priv)
2593 return -EINVAL;
2594
2595 work->cm_event.private_data =
2596 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2597
2598 spin_lock_irq(&cm_id_priv->lock);
2599 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2600 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2601 spin_unlock_irq(&cm_id_priv->lock);
2602 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2603 [CM_RTU_COUNTER]);
2604 goto out;
2605 }
2606 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2607
2608 ib_cancel_mad(cm_id_priv->msg);
2609 cm_queue_work_unlock(cm_id_priv, work);
2610 return 0;
2611out:
2612 cm_deref_id(cm_id_priv);
2613 return -EINVAL;
2614}
2615
2616static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2617 struct cm_id_private *cm_id_priv,
2618 const void *private_data,
2619 u8 private_data_len)
2620{
2621 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2622 cm_form_tid(cm_id_priv));
2623 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2624 be32_to_cpu(cm_id_priv->id.local_id));
2625 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2626 be32_to_cpu(cm_id_priv->id.remote_id));
2627 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2628 be32_to_cpu(cm_id_priv->remote_qpn));
2629
2630 if (private_data && private_data_len)
2631 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2632 private_data_len);
2633}
2634
2635static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2636 const void *private_data, u8 private_data_len)
2637{
2638 struct ib_mad_send_buf *msg;
2639 int ret;
2640
2641 lockdep_assert_held(&cm_id_priv->lock);
2642
2643 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2644 return -EINVAL;
2645
2646 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2647 trace_icm_dreq_skipped(&cm_id_priv->id);
2648 return -EINVAL;
2649 }
2650
2651 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2652 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2653 ib_cancel_mad(cm_id_priv->msg);
2654
2655 msg = cm_alloc_priv_msg(cm_id_priv);
2656 if (IS_ERR(msg)) {
2657 cm_enter_timewait(cm_id_priv);
2658 return PTR_ERR(msg);
2659 }
2660
2661 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2662 private_data, private_data_len);
2663 msg->timeout_ms = cm_id_priv->timeout_ms;
2664 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2665
2666 trace_icm_send_dreq(&cm_id_priv->id);
2667 ret = ib_post_send_mad(msg, NULL);
2668 if (ret) {
2669 cm_enter_timewait(cm_id_priv);
2670 cm_free_priv_msg(msg);
2671 return ret;
2672 }
2673
2674 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2675 return 0;
2676}
2677
2678int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2679 u8 private_data_len)
2680{
2681 struct cm_id_private *cm_id_priv =
2682 container_of(cm_id, struct cm_id_private, id);
2683 unsigned long flags;
2684 int ret;
2685
2686 spin_lock_irqsave(&cm_id_priv->lock, flags);
2687 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2688 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2689 return ret;
2690}
2691EXPORT_SYMBOL(ib_send_cm_dreq);
2692
2693static void cm_format_drep(struct cm_drep_msg *drep_msg,
2694 struct cm_id_private *cm_id_priv,
2695 const void *private_data,
2696 u8 private_data_len)
2697{
2698 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2699 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2700 be32_to_cpu(cm_id_priv->id.local_id));
2701 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2702 be32_to_cpu(cm_id_priv->id.remote_id));
2703
2704 if (private_data && private_data_len)
2705 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2706 private_data_len);
2707}
2708
2709static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2710 void *private_data, u8 private_data_len)
2711{
2712 struct ib_mad_send_buf *msg;
2713 int ret;
2714
2715 lockdep_assert_held(&cm_id_priv->lock);
2716
2717 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2718 return -EINVAL;
2719
2720 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2721 trace_icm_send_drep_err(&cm_id_priv->id);
2722 kfree(private_data);
2723 return -EINVAL;
2724 }
2725
2726 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2727 cm_enter_timewait(cm_id_priv);
2728
2729 msg = cm_alloc_msg(cm_id_priv);
2730 if (IS_ERR(msg))
2731 return PTR_ERR(msg);
2732
2733 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2734 private_data, private_data_len);
2735
2736 trace_icm_send_drep(&cm_id_priv->id);
2737 ret = ib_post_send_mad(msg, NULL);
2738 if (ret) {
2739 cm_free_msg(msg);
2740 return ret;
2741 }
2742 return 0;
2743}
2744
2745int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2746 u8 private_data_len)
2747{
2748 struct cm_id_private *cm_id_priv =
2749 container_of(cm_id, struct cm_id_private, id);
2750 unsigned long flags;
2751 void *data;
2752 int ret;
2753
2754 data = cm_copy_private_data(private_data, private_data_len);
2755 if (IS_ERR(data))
2756 return PTR_ERR(data);
2757
2758 spin_lock_irqsave(&cm_id_priv->lock, flags);
2759 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2760 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2761 return ret;
2762}
2763EXPORT_SYMBOL(ib_send_cm_drep);
2764
2765static int cm_issue_drep(struct cm_port *port,
2766 struct ib_mad_recv_wc *mad_recv_wc)
2767{
2768 struct ib_mad_send_buf *msg = NULL;
2769 struct cm_dreq_msg *dreq_msg;
2770 struct cm_drep_msg *drep_msg;
2771 int ret;
2772
2773 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2774 if (ret)
2775 return ret;
2776
2777 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2778 drep_msg = (struct cm_drep_msg *) msg->mad;
2779
2780 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2781 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2782 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2783 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2784 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2785
2786 trace_icm_issue_drep(
2787 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2788 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2789 ret = ib_post_send_mad(msg, NULL);
2790 if (ret)
2791 cm_free_response_msg(msg);
2792
2793 return ret;
2794}
2795
2796static int cm_dreq_handler(struct cm_work *work)
2797{
2798 struct cm_id_private *cm_id_priv;
2799 struct cm_dreq_msg *dreq_msg;
2800 struct ib_mad_send_buf *msg = NULL;
2801
2802 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2803 cm_id_priv = cm_acquire_id(
2804 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2805 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2806 if (!cm_id_priv) {
2807 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2808 [CM_DREQ_COUNTER]);
2809 cm_issue_drep(work->port, work->mad_recv_wc);
2810 trace_icm_no_priv_err(
2811 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2812 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2813 return -EINVAL;
2814 }
2815
2816 work->cm_event.private_data =
2817 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2818
2819 spin_lock_irq(&cm_id_priv->lock);
2820 if (cm_id_priv->local_qpn !=
2821 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2822 goto unlock;
2823
2824 switch (cm_id_priv->id.state) {
2825 case IB_CM_REP_SENT:
2826 case IB_CM_DREQ_SENT:
2827 case IB_CM_MRA_REP_RCVD:
2828 ib_cancel_mad(cm_id_priv->msg);
2829 break;
2830 case IB_CM_ESTABLISHED:
2831 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2832 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2833 ib_cancel_mad(cm_id_priv->msg);
2834 break;
2835 case IB_CM_TIMEWAIT:
2836 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2837 [CM_DREQ_COUNTER]);
2838 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2839 if (IS_ERR(msg))
2840 goto unlock;
2841
2842 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2843 cm_id_priv->private_data,
2844 cm_id_priv->private_data_len);
2845 spin_unlock_irq(&cm_id_priv->lock);
2846
2847 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2848 ib_post_send_mad(msg, NULL))
2849 cm_free_response_msg(msg);
2850 goto deref;
2851 case IB_CM_DREQ_RCVD:
2852 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2853 [CM_DREQ_COUNTER]);
2854 goto unlock;
2855 default:
2856 trace_icm_dreq_unknown_err(&cm_id_priv->id);
2857 goto unlock;
2858 }
2859 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2860 cm_id_priv->tid = dreq_msg->hdr.tid;
2861 cm_queue_work_unlock(cm_id_priv, work);
2862 return 0;
2863
2864unlock: spin_unlock_irq(&cm_id_priv->lock);
2865deref: cm_deref_id(cm_id_priv);
2866 return -EINVAL;
2867}
2868
2869static int cm_drep_handler(struct cm_work *work)
2870{
2871 struct cm_id_private *cm_id_priv;
2872 struct cm_drep_msg *drep_msg;
2873
2874 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2875 cm_id_priv = cm_acquire_id(
2876 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2877 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2878 if (!cm_id_priv)
2879 return -EINVAL;
2880
2881 work->cm_event.private_data =
2882 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2883
2884 spin_lock_irq(&cm_id_priv->lock);
2885 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2886 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2887 spin_unlock_irq(&cm_id_priv->lock);
2888 goto out;
2889 }
2890 cm_enter_timewait(cm_id_priv);
2891
2892 ib_cancel_mad(cm_id_priv->msg);
2893 cm_queue_work_unlock(cm_id_priv, work);
2894 return 0;
2895out:
2896 cm_deref_id(cm_id_priv);
2897 return -EINVAL;
2898}
2899
2900static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2901 enum ib_cm_rej_reason reason, void *ari,
2902 u8 ari_length, const void *private_data,
2903 u8 private_data_len)
2904{
2905 enum ib_cm_state state = cm_id_priv->id.state;
2906 struct ib_mad_send_buf *msg;
2907 int ret;
2908
2909 lockdep_assert_held(&cm_id_priv->lock);
2910
2911 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2912 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2913 return -EINVAL;
2914
2915 trace_icm_send_rej(&cm_id_priv->id, reason);
2916
2917 switch (state) {
2918 case IB_CM_REQ_SENT:
2919 case IB_CM_MRA_REQ_RCVD:
2920 case IB_CM_REQ_RCVD:
2921 case IB_CM_MRA_REQ_SENT:
2922 case IB_CM_REP_RCVD:
2923 case IB_CM_MRA_REP_SENT:
2924 cm_reset_to_idle(cm_id_priv);
2925 msg = cm_alloc_msg(cm_id_priv);
2926 if (IS_ERR(msg))
2927 return PTR_ERR(msg);
2928 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2929 ari, ari_length, private_data, private_data_len,
2930 state);
2931 break;
2932 case IB_CM_REP_SENT:
2933 case IB_CM_MRA_REP_RCVD:
2934 cm_enter_timewait(cm_id_priv);
2935 msg = cm_alloc_msg(cm_id_priv);
2936 if (IS_ERR(msg))
2937 return PTR_ERR(msg);
2938 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2939 ari, ari_length, private_data, private_data_len,
2940 state);
2941 break;
2942 default:
2943 trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2944 return -EINVAL;
2945 }
2946
2947 ret = ib_post_send_mad(msg, NULL);
2948 if (ret) {
2949 cm_free_msg(msg);
2950 return ret;
2951 }
2952
2953 return 0;
2954}
2955
2956int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2957 void *ari, u8 ari_length, const void *private_data,
2958 u8 private_data_len)
2959{
2960 struct cm_id_private *cm_id_priv =
2961 container_of(cm_id, struct cm_id_private, id);
2962 unsigned long flags;
2963 int ret;
2964
2965 spin_lock_irqsave(&cm_id_priv->lock, flags);
2966 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2967 private_data, private_data_len);
2968 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2969 return ret;
2970}
2971EXPORT_SYMBOL(ib_send_cm_rej);
2972
2973static void cm_format_rej_event(struct cm_work *work)
2974{
2975 struct cm_rej_msg *rej_msg;
2976 struct ib_cm_rej_event_param *param;
2977
2978 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2979 param = &work->cm_event.param.rej_rcvd;
2980 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2981 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2982 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2983 work->cm_event.private_data =
2984 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2985}
2986
2987static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2988{
2989 struct cm_id_private *cm_id_priv;
2990 __be32 remote_id;
2991
2992 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
2993
2994 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
2995 cm_id_priv = cm_find_remote_id(
2996 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
2997 remote_id);
2998 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
2999 CM_MSG_RESPONSE_REQ)
3000 cm_id_priv = cm_acquire_id(
3001 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3002 0);
3003 else
3004 cm_id_priv = cm_acquire_id(
3005 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3006 remote_id);
3007
3008 return cm_id_priv;
3009}
3010
3011static int cm_rej_handler(struct cm_work *work)
3012{
3013 struct cm_id_private *cm_id_priv;
3014 struct cm_rej_msg *rej_msg;
3015
3016 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3017 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3018 if (!cm_id_priv)
3019 return -EINVAL;
3020
3021 cm_format_rej_event(work);
3022
3023 spin_lock_irq(&cm_id_priv->lock);
3024 switch (cm_id_priv->id.state) {
3025 case IB_CM_REQ_SENT:
3026 case IB_CM_MRA_REQ_RCVD:
3027 case IB_CM_REP_SENT:
3028 case IB_CM_MRA_REP_RCVD:
3029 ib_cancel_mad(cm_id_priv->msg);
3030 fallthrough;
3031 case IB_CM_REQ_RCVD:
3032 case IB_CM_MRA_REQ_SENT:
3033 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3034 cm_enter_timewait(cm_id_priv);
3035 else
3036 cm_reset_to_idle(cm_id_priv);
3037 break;
3038 case IB_CM_DREQ_SENT:
3039 ib_cancel_mad(cm_id_priv->msg);
3040 fallthrough;
3041 case IB_CM_REP_RCVD:
3042 case IB_CM_MRA_REP_SENT:
3043 cm_enter_timewait(cm_id_priv);
3044 break;
3045 case IB_CM_ESTABLISHED:
3046 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3047 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3048 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3049 ib_cancel_mad(cm_id_priv->msg);
3050 cm_enter_timewait(cm_id_priv);
3051 break;
3052 }
3053 fallthrough;
3054 default:
3055 trace_icm_rej_unknown_err(&cm_id_priv->id);
3056 spin_unlock_irq(&cm_id_priv->lock);
3057 goto out;
3058 }
3059
3060 cm_queue_work_unlock(cm_id_priv, work);
3061 return 0;
3062out:
3063 cm_deref_id(cm_id_priv);
3064 return -EINVAL;
3065}
3066
3067int ib_send_cm_mra(struct ib_cm_id *cm_id,
3068 u8 service_timeout,
3069 const void *private_data,
3070 u8 private_data_len)
3071{
3072 struct cm_id_private *cm_id_priv;
3073 struct ib_mad_send_buf *msg;
3074 enum ib_cm_state cm_state;
3075 enum ib_cm_lap_state lap_state;
3076 enum cm_msg_response msg_response;
3077 void *data;
3078 unsigned long flags;
3079 int ret;
3080
3081 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3082 return -EINVAL;
3083
3084 data = cm_copy_private_data(private_data, private_data_len);
3085 if (IS_ERR(data))
3086 return PTR_ERR(data);
3087
3088 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3089
3090 spin_lock_irqsave(&cm_id_priv->lock, flags);
3091 switch (cm_id_priv->id.state) {
3092 case IB_CM_REQ_RCVD:
3093 cm_state = IB_CM_MRA_REQ_SENT;
3094 lap_state = cm_id->lap_state;
3095 msg_response = CM_MSG_RESPONSE_REQ;
3096 break;
3097 case IB_CM_REP_RCVD:
3098 cm_state = IB_CM_MRA_REP_SENT;
3099 lap_state = cm_id->lap_state;
3100 msg_response = CM_MSG_RESPONSE_REP;
3101 break;
3102 case IB_CM_ESTABLISHED:
3103 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3104 cm_state = cm_id->state;
3105 lap_state = IB_CM_MRA_LAP_SENT;
3106 msg_response = CM_MSG_RESPONSE_OTHER;
3107 break;
3108 }
3109 fallthrough;
3110 default:
3111 trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3112 ret = -EINVAL;
3113 goto error_unlock;
3114 }
3115
3116 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3117 msg = cm_alloc_msg(cm_id_priv);
3118 if (IS_ERR(msg)) {
3119 ret = PTR_ERR(msg);
3120 goto error_unlock;
3121 }
3122
3123 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3124 msg_response, service_timeout,
3125 private_data, private_data_len);
3126 trace_icm_send_mra(cm_id);
3127 ret = ib_post_send_mad(msg, NULL);
3128 if (ret)
3129 goto error_free_msg;
3130 }
3131
3132 cm_id->state = cm_state;
3133 cm_id->lap_state = lap_state;
3134 cm_id_priv->service_timeout = service_timeout;
3135 cm_set_private_data(cm_id_priv, data, private_data_len);
3136 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3137 return 0;
3138
3139error_free_msg:
3140 cm_free_msg(msg);
3141error_unlock:
3142 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3143 kfree(data);
3144 return ret;
3145}
3146EXPORT_SYMBOL(ib_send_cm_mra);
3147
3148static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3149{
3150 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3151 case CM_MSG_RESPONSE_REQ:
3152 return cm_acquire_id(
3153 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3154 0);
3155 case CM_MSG_RESPONSE_REP:
3156 case CM_MSG_RESPONSE_OTHER:
3157 return cm_acquire_id(
3158 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3159 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3160 default:
3161 return NULL;
3162 }
3163}
3164
3165static int cm_mra_handler(struct cm_work *work)
3166{
3167 struct cm_id_private *cm_id_priv;
3168 struct cm_mra_msg *mra_msg;
3169 int timeout;
3170
3171 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3172 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3173 if (!cm_id_priv)
3174 return -EINVAL;
3175
3176 work->cm_event.private_data =
3177 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3178 work->cm_event.param.mra_rcvd.service_timeout =
3179 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3180 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3181 cm_convert_to_ms(cm_id_priv->av.timeout);
3182
3183 spin_lock_irq(&cm_id_priv->lock);
3184 switch (cm_id_priv->id.state) {
3185 case IB_CM_REQ_SENT:
3186 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3187 CM_MSG_RESPONSE_REQ ||
3188 ib_modify_mad(cm_id_priv->msg, timeout))
3189 goto out;
3190 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3191 break;
3192 case IB_CM_REP_SENT:
3193 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3194 CM_MSG_RESPONSE_REP ||
3195 ib_modify_mad(cm_id_priv->msg, timeout))
3196 goto out;
3197 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3198 break;
3199 case IB_CM_ESTABLISHED:
3200 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3201 CM_MSG_RESPONSE_OTHER ||
3202 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3203 ib_modify_mad(cm_id_priv->msg, timeout)) {
3204 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3205 atomic_long_inc(
3206 &work->port->counters[CM_RECV_DUPLICATES]
3207 [CM_MRA_COUNTER]);
3208 goto out;
3209 }
3210 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3211 break;
3212 case IB_CM_MRA_REQ_RCVD:
3213 case IB_CM_MRA_REP_RCVD:
3214 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3215 [CM_MRA_COUNTER]);
3216 fallthrough;
3217 default:
3218 trace_icm_mra_unknown_err(&cm_id_priv->id);
3219 goto out;
3220 }
3221
3222 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3223 cm_id_priv->id.state;
3224 cm_queue_work_unlock(cm_id_priv, work);
3225 return 0;
3226out:
3227 spin_unlock_irq(&cm_id_priv->lock);
3228 cm_deref_id(cm_id_priv);
3229 return -EINVAL;
3230}
3231
3232static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3233 struct sa_path_rec *path)
3234{
3235 u32 lid;
3236
3237 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3238 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3239 lap_msg));
3240 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3241 lap_msg));
3242 } else {
3243 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3244 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3245 sa_path_set_dlid(path, lid);
3246
3247 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3248 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3249 sa_path_set_slid(path, lid);
3250 }
3251}
3252
3253static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3254 struct sa_path_rec *path,
3255 struct cm_lap_msg *lap_msg)
3256{
3257 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3258 path->sgid =
3259 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3260 path->flow_label =
3261 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3262 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3263 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3264 path->reversible = 1;
3265 path->pkey = cm_id_priv->pkey;
3266 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3267 path->mtu_selector = IB_SA_EQ;
3268 path->mtu = cm_id_priv->path_mtu;
3269 path->rate_selector = IB_SA_EQ;
3270 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3271 path->packet_life_time_selector = IB_SA_EQ;
3272 path->packet_life_time =
3273 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3274 path->packet_life_time -= (path->packet_life_time > 0);
3275 cm_format_path_lid_from_lap(lap_msg, path);
3276}
3277
3278static int cm_lap_handler(struct cm_work *work)
3279{
3280 struct cm_id_private *cm_id_priv;
3281 struct cm_lap_msg *lap_msg;
3282 struct ib_cm_lap_event_param *param;
3283 struct ib_mad_send_buf *msg = NULL;
3284 struct rdma_ah_attr ah_attr;
3285 struct cm_av alt_av = {};
3286 int ret;
3287
3288 /* Currently Alternate path messages are not supported for
3289 * RoCE link layer.
3290 */
3291 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3292 work->port->port_num))
3293 return -EINVAL;
3294
3295 /* todo: verify LAP request and send reject APR if invalid. */
3296 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3297 cm_id_priv = cm_acquire_id(
3298 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3299 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3300 if (!cm_id_priv)
3301 return -EINVAL;
3302
3303 param = &work->cm_event.param.lap_rcvd;
3304 memset(&work->path[0], 0, sizeof(work->path[1]));
3305 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3306 work->port->port_num, &work->path[0],
3307 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3308 lap_msg));
3309 param->alternate_path = &work->path[0];
3310 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3311 work->cm_event.private_data =
3312 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3313
3314 ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
3315 work->port->port_num,
3316 work->mad_recv_wc->wc,
3317 work->mad_recv_wc->recv_buf.grh,
3318 &ah_attr);
3319 if (ret)
3320 goto deref;
3321
3322 ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
3323 if (ret) {
3324 rdma_destroy_ah_attr(&ah_attr);
3325 goto deref;
3326 }
3327
3328 spin_lock_irq(&cm_id_priv->lock);
3329 cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3330 &ah_attr, &cm_id_priv->av);
3331 cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
3332
3333 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3334 goto unlock;
3335
3336 switch (cm_id_priv->id.lap_state) {
3337 case IB_CM_LAP_UNINIT:
3338 case IB_CM_LAP_IDLE:
3339 break;
3340 case IB_CM_MRA_LAP_SENT:
3341 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3342 [CM_LAP_COUNTER]);
3343 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3344 if (IS_ERR(msg))
3345 goto unlock;
3346
3347 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3348 CM_MSG_RESPONSE_OTHER,
3349 cm_id_priv->service_timeout,
3350 cm_id_priv->private_data,
3351 cm_id_priv->private_data_len);
3352 spin_unlock_irq(&cm_id_priv->lock);
3353
3354 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3355 ib_post_send_mad(msg, NULL))
3356 cm_free_response_msg(msg);
3357 goto deref;
3358 case IB_CM_LAP_RCVD:
3359 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3360 [CM_LAP_COUNTER]);
3361 goto unlock;
3362 default:
3363 goto unlock;
3364 }
3365
3366 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3367 cm_id_priv->tid = lap_msg->hdr.tid;
3368 cm_queue_work_unlock(cm_id_priv, work);
3369 return 0;
3370
3371unlock: spin_unlock_irq(&cm_id_priv->lock);
3372deref: cm_deref_id(cm_id_priv);
3373 return -EINVAL;
3374}
3375
3376static int cm_apr_handler(struct cm_work *work)
3377{
3378 struct cm_id_private *cm_id_priv;
3379 struct cm_apr_msg *apr_msg;
3380
3381 /* Currently Alternate path messages are not supported for
3382 * RoCE link layer.
3383 */
3384 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3385 work->port->port_num))
3386 return -EINVAL;
3387
3388 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3389 cm_id_priv = cm_acquire_id(
3390 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3391 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3392 if (!cm_id_priv)
3393 return -EINVAL; /* Unmatched reply. */
3394
3395 work->cm_event.param.apr_rcvd.ap_status =
3396 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3397 work->cm_event.param.apr_rcvd.apr_info =
3398 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3399 work->cm_event.param.apr_rcvd.info_len =
3400 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3401 work->cm_event.private_data =
3402 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3403
3404 spin_lock_irq(&cm_id_priv->lock);
3405 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3406 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3407 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3408 spin_unlock_irq(&cm_id_priv->lock);
3409 goto out;
3410 }
3411 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3412 ib_cancel_mad(cm_id_priv->msg);
3413 cm_queue_work_unlock(cm_id_priv, work);
3414 return 0;
3415out:
3416 cm_deref_id(cm_id_priv);
3417 return -EINVAL;
3418}
3419
3420static int cm_timewait_handler(struct cm_work *work)
3421{
3422 struct cm_timewait_info *timewait_info;
3423 struct cm_id_private *cm_id_priv;
3424
3425 timewait_info = container_of(work, struct cm_timewait_info, work);
3426 spin_lock_irq(&cm.lock);
3427 list_del(&timewait_info->list);
3428 spin_unlock_irq(&cm.lock);
3429
3430 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3431 timewait_info->work.remote_id);
3432 if (!cm_id_priv)
3433 return -EINVAL;
3434
3435 spin_lock_irq(&cm_id_priv->lock);
3436 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3437 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3438 spin_unlock_irq(&cm_id_priv->lock);
3439 goto out;
3440 }
3441 cm_id_priv->id.state = IB_CM_IDLE;
3442 cm_queue_work_unlock(cm_id_priv, work);
3443 return 0;
3444out:
3445 cm_deref_id(cm_id_priv);
3446 return -EINVAL;
3447}
3448
3449static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3450 struct cm_id_private *cm_id_priv,
3451 struct ib_cm_sidr_req_param *param)
3452{
3453 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3454 cm_form_tid(cm_id_priv));
3455 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3456 be32_to_cpu(cm_id_priv->id.local_id));
3457 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3458 be16_to_cpu(param->path->pkey));
3459 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3460 be64_to_cpu(param->service_id));
3461
3462 if (param->private_data && param->private_data_len)
3463 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3464 param->private_data, param->private_data_len);
3465}
3466
3467int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3468 struct ib_cm_sidr_req_param *param)
3469{
3470 struct cm_id_private *cm_id_priv;
3471 struct ib_mad_send_buf *msg;
3472 struct cm_av av = {};
3473 unsigned long flags;
3474 int ret;
3475
3476 if (!param->path || (param->private_data &&
3477 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3478 return -EINVAL;
3479
3480 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3481 ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
3482 if (ret)
3483 return ret;
3484
3485 spin_lock_irqsave(&cm_id_priv->lock, flags);
3486 cm_move_av_from_path(&cm_id_priv->av, &av);
3487 cm_id->service_id = param->service_id;
3488 cm_id_priv->timeout_ms = param->timeout_ms;
3489 cm_id_priv->max_cm_retries = param->max_cm_retries;
3490 if (cm_id->state != IB_CM_IDLE) {
3491 ret = -EINVAL;
3492 goto out_unlock;
3493 }
3494
3495 msg = cm_alloc_priv_msg(cm_id_priv);
3496 if (IS_ERR(msg)) {
3497 ret = PTR_ERR(msg);
3498 goto out_unlock;
3499 }
3500
3501 cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
3502 param);
3503 msg->timeout_ms = cm_id_priv->timeout_ms;
3504 msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
3505
3506 trace_icm_send_sidr_req(&cm_id_priv->id);
3507 ret = ib_post_send_mad(msg, NULL);
3508 if (ret)
3509 goto out_free;
3510 cm_id->state = IB_CM_SIDR_REQ_SENT;
3511 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3512 return 0;
3513out_free:
3514 cm_free_priv_msg(msg);
3515out_unlock:
3516 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3517 return ret;
3518}
3519EXPORT_SYMBOL(ib_send_cm_sidr_req);
3520
3521static void cm_format_sidr_req_event(struct cm_work *work,
3522 const struct cm_id_private *rx_cm_id,
3523 struct ib_cm_id *listen_id)
3524{
3525 struct cm_sidr_req_msg *sidr_req_msg;
3526 struct ib_cm_sidr_req_event_param *param;
3527
3528 sidr_req_msg = (struct cm_sidr_req_msg *)
3529 work->mad_recv_wc->recv_buf.mad;
3530 param = &work->cm_event.param.sidr_req_rcvd;
3531 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3532 param->listen_id = listen_id;
3533 param->service_id =
3534 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3535 param->bth_pkey = cm_get_bth_pkey(work);
3536 param->port = work->port->port_num;
3537 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3538 work->cm_event.private_data =
3539 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3540}
3541
3542static int cm_sidr_req_handler(struct cm_work *work)
3543{
3544 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3545 struct cm_sidr_req_msg *sidr_req_msg;
3546 struct ib_wc *wc;
3547 int ret;
3548
3549 cm_id_priv =
3550 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3551 if (IS_ERR(cm_id_priv))
3552 return PTR_ERR(cm_id_priv);
3553
3554 /* Record SGID/SLID and request ID for lookup. */
3555 sidr_req_msg = (struct cm_sidr_req_msg *)
3556 work->mad_recv_wc->recv_buf.mad;
3557
3558 cm_id_priv->id.remote_id =
3559 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3560 cm_id_priv->id.service_id =
3561 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3562 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3563
3564 wc = work->mad_recv_wc->wc;
3565 cm_id_priv->sidr_slid = wc->slid;
3566 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3567 work->mad_recv_wc->recv_buf.grh,
3568 &cm_id_priv->av);
3569 if (ret)
3570 goto out;
3571
3572 spin_lock_irq(&cm.lock);
3573 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3574 if (listen_cm_id_priv) {
3575 spin_unlock_irq(&cm.lock);
3576 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3577 [CM_SIDR_REQ_COUNTER]);
3578 goto out; /* Duplicate message. */
3579 }
3580 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3581 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3582 cm_id_priv->id.service_id);
3583 if (!listen_cm_id_priv) {
3584 spin_unlock_irq(&cm.lock);
3585 ib_send_cm_sidr_rep(&cm_id_priv->id,
3586 &(struct ib_cm_sidr_rep_param){
3587 .status = IB_SIDR_UNSUPPORTED });
3588 goto out; /* No match. */
3589 }
3590 spin_unlock_irq(&cm.lock);
3591
3592 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3593 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3594
3595 /*
3596 * A SIDR ID does not need to be in the xarray since it does not receive
3597 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3598 * not enter timewait.
3599 */
3600
3601 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3602 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3603 cm_free_work(work);
3604 /*
3605 * A pointer to the listen_cm_id is held in the event, so this deref
3606 * must be after the event is delivered above.
3607 */
3608 cm_deref_id(listen_cm_id_priv);
3609 if (ret)
3610 cm_destroy_id(&cm_id_priv->id, ret);
3611 return 0;
3612out:
3613 ib_destroy_cm_id(&cm_id_priv->id);
3614 return -EINVAL;
3615}
3616
3617static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3618 struct cm_id_private *cm_id_priv,
3619 struct ib_cm_sidr_rep_param *param)
3620{
3621 cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3622 cm_id_priv->tid, param->ece.attr_mod);
3623 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3624 be32_to_cpu(cm_id_priv->id.remote_id));
3625 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3626 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3627 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3628 be64_to_cpu(cm_id_priv->id.service_id));
3629 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3630 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3631 param->ece.vendor_id & 0xFF);
3632 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3633 (param->ece.vendor_id >> 8) & 0xFF);
3634
3635 if (param->info && param->info_length)
3636 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3637 param->info, param->info_length);
3638
3639 if (param->private_data && param->private_data_len)
3640 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3641 param->private_data, param->private_data_len);
3642}
3643
3644static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3645 struct ib_cm_sidr_rep_param *param)
3646{
3647 struct ib_mad_send_buf *msg;
3648 unsigned long flags;
3649 int ret;
3650
3651 lockdep_assert_held(&cm_id_priv->lock);
3652
3653 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3654 (param->private_data &&
3655 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3656 return -EINVAL;
3657
3658 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3659 return -EINVAL;
3660
3661 msg = cm_alloc_msg(cm_id_priv);
3662 if (IS_ERR(msg))
3663 return PTR_ERR(msg);
3664
3665 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3666 param);
3667 trace_icm_send_sidr_rep(&cm_id_priv->id);
3668 ret = ib_post_send_mad(msg, NULL);
3669 if (ret) {
3670 cm_free_msg(msg);
3671 return ret;
3672 }
3673 cm_id_priv->id.state = IB_CM_IDLE;
3674 spin_lock_irqsave(&cm.lock, flags);
3675 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3676 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3677 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3678 }
3679 spin_unlock_irqrestore(&cm.lock, flags);
3680 return 0;
3681}
3682
3683int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3684 struct ib_cm_sidr_rep_param *param)
3685{
3686 struct cm_id_private *cm_id_priv =
3687 container_of(cm_id, struct cm_id_private, id);
3688 unsigned long flags;
3689 int ret;
3690
3691 spin_lock_irqsave(&cm_id_priv->lock, flags);
3692 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3693 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3694 return ret;
3695}
3696EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3697
3698static void cm_format_sidr_rep_event(struct cm_work *work,
3699 const struct cm_id_private *cm_id_priv)
3700{
3701 struct cm_sidr_rep_msg *sidr_rep_msg;
3702 struct ib_cm_sidr_rep_event_param *param;
3703
3704 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3705 work->mad_recv_wc->recv_buf.mad;
3706 param = &work->cm_event.param.sidr_rep_rcvd;
3707 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3708 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3709 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3710 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3711 sidr_rep_msg);
3712 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3713 sidr_rep_msg);
3714 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3715 work->cm_event.private_data =
3716 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3717}
3718
3719static int cm_sidr_rep_handler(struct cm_work *work)
3720{
3721 struct cm_sidr_rep_msg *sidr_rep_msg;
3722 struct cm_id_private *cm_id_priv;
3723
3724 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3725 work->mad_recv_wc->recv_buf.mad;
3726 cm_id_priv = cm_acquire_id(
3727 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3728 if (!cm_id_priv)
3729 return -EINVAL; /* Unmatched reply. */
3730
3731 spin_lock_irq(&cm_id_priv->lock);
3732 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3733 spin_unlock_irq(&cm_id_priv->lock);
3734 goto out;
3735 }
3736 cm_id_priv->id.state = IB_CM_IDLE;
3737 ib_cancel_mad(cm_id_priv->msg);
3738 spin_unlock_irq(&cm_id_priv->lock);
3739
3740 cm_format_sidr_rep_event(work, cm_id_priv);
3741 cm_process_work(cm_id_priv, work);
3742 return 0;
3743out:
3744 cm_deref_id(cm_id_priv);
3745 return -EINVAL;
3746}
3747
3748static void cm_process_send_error(struct cm_id_private *cm_id_priv,
3749 struct ib_mad_send_buf *msg,
3750 enum ib_cm_state state,
3751 enum ib_wc_status wc_status)
3752{
3753 struct ib_cm_event cm_event = {};
3754 int ret;
3755
3756 /* Discard old sends or ones without a response. */
3757 spin_lock_irq(&cm_id_priv->lock);
3758 if (msg != cm_id_priv->msg) {
3759 spin_unlock_irq(&cm_id_priv->lock);
3760 cm_free_msg(msg);
3761 return;
3762 }
3763 cm_free_priv_msg(msg);
3764
3765 if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
3766 wc_status == IB_WC_WR_FLUSH_ERR)
3767 goto out_unlock;
3768
3769 trace_icm_mad_send_err(state, wc_status);
3770 switch (state) {
3771 case IB_CM_REQ_SENT:
3772 case IB_CM_MRA_REQ_RCVD:
3773 cm_reset_to_idle(cm_id_priv);
3774 cm_event.event = IB_CM_REQ_ERROR;
3775 break;
3776 case IB_CM_REP_SENT:
3777 case IB_CM_MRA_REP_RCVD:
3778 cm_reset_to_idle(cm_id_priv);
3779 cm_event.event = IB_CM_REP_ERROR;
3780 break;
3781 case IB_CM_DREQ_SENT:
3782 cm_enter_timewait(cm_id_priv);
3783 cm_event.event = IB_CM_DREQ_ERROR;
3784 break;
3785 case IB_CM_SIDR_REQ_SENT:
3786 cm_id_priv->id.state = IB_CM_IDLE;
3787 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3788 break;
3789 default:
3790 goto out_unlock;
3791 }
3792 spin_unlock_irq(&cm_id_priv->lock);
3793 cm_event.param.send_status = wc_status;
3794
3795 /* No other events can occur on the cm_id at this point. */
3796 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3797 if (ret)
3798 ib_destroy_cm_id(&cm_id_priv->id);
3799 return;
3800out_unlock:
3801 spin_unlock_irq(&cm_id_priv->lock);
3802}
3803
3804static void cm_send_handler(struct ib_mad_agent *mad_agent,
3805 struct ib_mad_send_wc *mad_send_wc)
3806{
3807 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3808 struct cm_id_private *cm_id_priv = msg->context[0];
3809 enum ib_cm_state state =
3810 (enum ib_cm_state)(unsigned long)msg->context[1];
3811 struct cm_port *port;
3812 u16 attr_index;
3813
3814 port = mad_agent->context;
3815 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3816 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3817
3818 /*
3819 * If the send was in response to a received message (context[0] is not
3820 * set to a cm_id), and is not a REJ, then it is a send that was
3821 * manually retried.
3822 */
3823 if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
3824 msg->retries = 1;
3825
3826 atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
3827 if (msg->retries)
3828 atomic_long_add(msg->retries,
3829 &port->counters[CM_XMIT_RETRIES][attr_index]);
3830
3831 if (cm_id_priv)
3832 cm_process_send_error(cm_id_priv, msg, state,
3833 mad_send_wc->status);
3834 else
3835 cm_free_response_msg(msg);
3836}
3837
3838static void cm_work_handler(struct work_struct *_work)
3839{
3840 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3841 int ret;
3842
3843 switch (work->cm_event.event) {
3844 case IB_CM_REQ_RECEIVED:
3845 ret = cm_req_handler(work);
3846 break;
3847 case IB_CM_MRA_RECEIVED:
3848 ret = cm_mra_handler(work);
3849 break;
3850 case IB_CM_REJ_RECEIVED:
3851 ret = cm_rej_handler(work);
3852 break;
3853 case IB_CM_REP_RECEIVED:
3854 ret = cm_rep_handler(work);
3855 break;
3856 case IB_CM_RTU_RECEIVED:
3857 ret = cm_rtu_handler(work);
3858 break;
3859 case IB_CM_USER_ESTABLISHED:
3860 ret = cm_establish_handler(work);
3861 break;
3862 case IB_CM_DREQ_RECEIVED:
3863 ret = cm_dreq_handler(work);
3864 break;
3865 case IB_CM_DREP_RECEIVED:
3866 ret = cm_drep_handler(work);
3867 break;
3868 case IB_CM_SIDR_REQ_RECEIVED:
3869 ret = cm_sidr_req_handler(work);
3870 break;
3871 case IB_CM_SIDR_REP_RECEIVED:
3872 ret = cm_sidr_rep_handler(work);
3873 break;
3874 case IB_CM_LAP_RECEIVED:
3875 ret = cm_lap_handler(work);
3876 break;
3877 case IB_CM_APR_RECEIVED:
3878 ret = cm_apr_handler(work);
3879 break;
3880 case IB_CM_TIMEWAIT_EXIT:
3881 ret = cm_timewait_handler(work);
3882 break;
3883 default:
3884 trace_icm_handler_err(work->cm_event.event);
3885 ret = -EINVAL;
3886 break;
3887 }
3888 if (ret)
3889 cm_free_work(work);
3890}
3891
3892static int cm_establish(struct ib_cm_id *cm_id)
3893{
3894 struct cm_id_private *cm_id_priv;
3895 struct cm_work *work;
3896 unsigned long flags;
3897 int ret = 0;
3898 struct cm_device *cm_dev;
3899
3900 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3901 if (!cm_dev)
3902 return -ENODEV;
3903
3904 work = kmalloc(sizeof *work, GFP_ATOMIC);
3905 if (!work)
3906 return -ENOMEM;
3907
3908 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3909 spin_lock_irqsave(&cm_id_priv->lock, flags);
3910 switch (cm_id->state) {
3911 case IB_CM_REP_SENT:
3912 case IB_CM_MRA_REP_RCVD:
3913 cm_id->state = IB_CM_ESTABLISHED;
3914 break;
3915 case IB_CM_ESTABLISHED:
3916 ret = -EISCONN;
3917 break;
3918 default:
3919 trace_icm_establish_err(cm_id);
3920 ret = -EINVAL;
3921 break;
3922 }
3923 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3924
3925 if (ret) {
3926 kfree(work);
3927 goto out;
3928 }
3929
3930 /*
3931 * The CM worker thread may try to destroy the cm_id before it
3932 * can execute this work item. To prevent potential deadlock,
3933 * we need to find the cm_id once we're in the context of the
3934 * worker thread, rather than holding a reference on it.
3935 */
3936 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3937 work->local_id = cm_id->local_id;
3938 work->remote_id = cm_id->remote_id;
3939 work->mad_recv_wc = NULL;
3940 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3941
3942 /* Check if the device started its remove_one */
3943 spin_lock_irqsave(&cm.lock, flags);
3944 if (!cm_dev->going_down) {
3945 queue_delayed_work(cm.wq, &work->work, 0);
3946 } else {
3947 kfree(work);
3948 ret = -ENODEV;
3949 }
3950 spin_unlock_irqrestore(&cm.lock, flags);
3951
3952out:
3953 return ret;
3954}
3955
3956static int cm_migrate(struct ib_cm_id *cm_id)
3957{
3958 struct cm_id_private *cm_id_priv;
3959 unsigned long flags;
3960 int ret = 0;
3961
3962 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3963 spin_lock_irqsave(&cm_id_priv->lock, flags);
3964 if (cm_id->state == IB_CM_ESTABLISHED &&
3965 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3966 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3967 cm_id->lap_state = IB_CM_LAP_IDLE;
3968 cm_id_priv->av = cm_id_priv->alt_av;
3969 } else
3970 ret = -EINVAL;
3971 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3972
3973 return ret;
3974}
3975
3976int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3977{
3978 int ret;
3979
3980 switch (event) {
3981 case IB_EVENT_COMM_EST:
3982 ret = cm_establish(cm_id);
3983 break;
3984 case IB_EVENT_PATH_MIG:
3985 ret = cm_migrate(cm_id);
3986 break;
3987 default:
3988 ret = -EINVAL;
3989 }
3990 return ret;
3991}
3992EXPORT_SYMBOL(ib_cm_notify);
3993
3994static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3995 struct ib_mad_send_buf *send_buf,
3996 struct ib_mad_recv_wc *mad_recv_wc)
3997{
3998 struct cm_port *port = mad_agent->context;
3999 struct cm_work *work;
4000 enum ib_cm_event_type event;
4001 bool alt_path = false;
4002 u16 attr_id;
4003 int paths = 0;
4004 int going_down = 0;
4005
4006 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4007 case CM_REQ_ATTR_ID:
4008 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4009 mad_recv_wc->recv_buf.mad);
4010 paths = 1 + (alt_path != 0);
4011 event = IB_CM_REQ_RECEIVED;
4012 break;
4013 case CM_MRA_ATTR_ID:
4014 event = IB_CM_MRA_RECEIVED;
4015 break;
4016 case CM_REJ_ATTR_ID:
4017 event = IB_CM_REJ_RECEIVED;
4018 break;
4019 case CM_REP_ATTR_ID:
4020 event = IB_CM_REP_RECEIVED;
4021 break;
4022 case CM_RTU_ATTR_ID:
4023 event = IB_CM_RTU_RECEIVED;
4024 break;
4025 case CM_DREQ_ATTR_ID:
4026 event = IB_CM_DREQ_RECEIVED;
4027 break;
4028 case CM_DREP_ATTR_ID:
4029 event = IB_CM_DREP_RECEIVED;
4030 break;
4031 case CM_SIDR_REQ_ATTR_ID:
4032 event = IB_CM_SIDR_REQ_RECEIVED;
4033 break;
4034 case CM_SIDR_REP_ATTR_ID:
4035 event = IB_CM_SIDR_REP_RECEIVED;
4036 break;
4037 case CM_LAP_ATTR_ID:
4038 paths = 1;
4039 event = IB_CM_LAP_RECEIVED;
4040 break;
4041 case CM_APR_ATTR_ID:
4042 event = IB_CM_APR_RECEIVED;
4043 break;
4044 default:
4045 ib_free_recv_mad(mad_recv_wc);
4046 return;
4047 }
4048
4049 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4050 atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
4051
4052 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4053 if (!work) {
4054 ib_free_recv_mad(mad_recv_wc);
4055 return;
4056 }
4057
4058 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4059 work->cm_event.event = event;
4060 work->mad_recv_wc = mad_recv_wc;
4061 work->port = port;
4062
4063 /* Check if the device started its remove_one */
4064 spin_lock_irq(&cm.lock);
4065 if (!port->cm_dev->going_down)
4066 queue_delayed_work(cm.wq, &work->work, 0);
4067 else
4068 going_down = 1;
4069 spin_unlock_irq(&cm.lock);
4070
4071 if (going_down) {
4072 kfree(work);
4073 ib_free_recv_mad(mad_recv_wc);
4074 }
4075}
4076
4077static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4078 struct ib_qp_attr *qp_attr,
4079 int *qp_attr_mask)
4080{
4081 unsigned long flags;
4082 int ret;
4083
4084 spin_lock_irqsave(&cm_id_priv->lock, flags);
4085 switch (cm_id_priv->id.state) {
4086 case IB_CM_REQ_SENT:
4087 case IB_CM_MRA_REQ_RCVD:
4088 case IB_CM_REQ_RCVD:
4089 case IB_CM_MRA_REQ_SENT:
4090 case IB_CM_REP_RCVD:
4091 case IB_CM_MRA_REP_SENT:
4092 case IB_CM_REP_SENT:
4093 case IB_CM_MRA_REP_RCVD:
4094 case IB_CM_ESTABLISHED:
4095 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4096 IB_QP_PKEY_INDEX | IB_QP_PORT;
4097 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4098 if (cm_id_priv->responder_resources) {
4099 struct ib_device *ib_dev = cm_id_priv->id.device;
4100 u64 support_flush = ib_dev->attrs.device_cap_flags &
4101 (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT);
4102 u32 flushable = support_flush ?
4103 (IB_ACCESS_FLUSH_GLOBAL |
4104 IB_ACCESS_FLUSH_PERSISTENT) : 0;
4105
4106 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4107 IB_ACCESS_REMOTE_ATOMIC |
4108 flushable;
4109 }
4110 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4111 if (cm_id_priv->av.port)
4112 qp_attr->port_num = cm_id_priv->av.port->port_num;
4113 ret = 0;
4114 break;
4115 default:
4116 trace_icm_qp_init_err(&cm_id_priv->id);
4117 ret = -EINVAL;
4118 break;
4119 }
4120 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4121 return ret;
4122}
4123
4124static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4125 struct ib_qp_attr *qp_attr,
4126 int *qp_attr_mask)
4127{
4128 unsigned long flags;
4129 int ret;
4130
4131 spin_lock_irqsave(&cm_id_priv->lock, flags);
4132 switch (cm_id_priv->id.state) {
4133 case IB_CM_REQ_RCVD:
4134 case IB_CM_MRA_REQ_SENT:
4135 case IB_CM_REP_RCVD:
4136 case IB_CM_MRA_REP_SENT:
4137 case IB_CM_REP_SENT:
4138 case IB_CM_MRA_REP_RCVD:
4139 case IB_CM_ESTABLISHED:
4140 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4141 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4142 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4143 if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
4144 cm_id_priv->av.dlid_datapath &&
4145 (cm_id_priv->av.dlid_datapath != 0xffff))
4146 qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
4147 qp_attr->path_mtu = cm_id_priv->path_mtu;
4148 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4149 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4150 if (cm_id_priv->qp_type == IB_QPT_RC ||
4151 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4152 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4153 IB_QP_MIN_RNR_TIMER;
4154 qp_attr->max_dest_rd_atomic =
4155 cm_id_priv->responder_resources;
4156 qp_attr->min_rnr_timer = 0;
4157 }
4158 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
4159 cm_id_priv->alt_av.port) {
4160 *qp_attr_mask |= IB_QP_ALT_PATH;
4161 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4162 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4163 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4164 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4165 }
4166 ret = 0;
4167 break;
4168 default:
4169 trace_icm_qp_rtr_err(&cm_id_priv->id);
4170 ret = -EINVAL;
4171 break;
4172 }
4173 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4174 return ret;
4175}
4176
4177static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4178 struct ib_qp_attr *qp_attr,
4179 int *qp_attr_mask)
4180{
4181 unsigned long flags;
4182 int ret;
4183
4184 spin_lock_irqsave(&cm_id_priv->lock, flags);
4185 switch (cm_id_priv->id.state) {
4186 /* Allow transition to RTS before sending REP */
4187 case IB_CM_REQ_RCVD:
4188 case IB_CM_MRA_REQ_SENT:
4189
4190 case IB_CM_REP_RCVD:
4191 case IB_CM_MRA_REP_SENT:
4192 case IB_CM_REP_SENT:
4193 case IB_CM_MRA_REP_RCVD:
4194 case IB_CM_ESTABLISHED:
4195 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4196 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4197 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4198 switch (cm_id_priv->qp_type) {
4199 case IB_QPT_RC:
4200 case IB_QPT_XRC_INI:
4201 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4202 IB_QP_MAX_QP_RD_ATOMIC;
4203 qp_attr->retry_cnt = cm_id_priv->retry_count;
4204 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4205 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4206 fallthrough;
4207 case IB_QPT_XRC_TGT:
4208 *qp_attr_mask |= IB_QP_TIMEOUT;
4209 qp_attr->timeout = cm_id_priv->av.timeout;
4210 break;
4211 default:
4212 break;
4213 }
4214 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4215 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4216 qp_attr->path_mig_state = IB_MIG_REARM;
4217 }
4218 } else {
4219 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4220 if (cm_id_priv->alt_av.port)
4221 qp_attr->alt_port_num =
4222 cm_id_priv->alt_av.port->port_num;
4223 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4224 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4225 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4226 qp_attr->path_mig_state = IB_MIG_REARM;
4227 }
4228 ret = 0;
4229 break;
4230 default:
4231 trace_icm_qp_rts_err(&cm_id_priv->id);
4232 ret = -EINVAL;
4233 break;
4234 }
4235 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4236 return ret;
4237}
4238
4239int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4240 struct ib_qp_attr *qp_attr,
4241 int *qp_attr_mask)
4242{
4243 struct cm_id_private *cm_id_priv;
4244 int ret;
4245
4246 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4247 switch (qp_attr->qp_state) {
4248 case IB_QPS_INIT:
4249 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4250 break;
4251 case IB_QPS_RTR:
4252 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4253 break;
4254 case IB_QPS_RTS:
4255 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4256 break;
4257 default:
4258 ret = -EINVAL;
4259 break;
4260 }
4261 return ret;
4262}
4263EXPORT_SYMBOL(ib_cm_init_qp_attr);
4264
4265static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
4266 struct ib_port_attribute *attr, char *buf)
4267{
4268 struct cm_counter_attribute *cm_attr =
4269 container_of(attr, struct cm_counter_attribute, attr);
4270 struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
4271
4272 if (WARN_ON(!cm_dev))
4273 return -EINVAL;
4274
4275 return sysfs_emit(
4276 buf, "%ld\n",
4277 atomic_long_read(
4278 &cm_dev->port[port_num - 1]
4279 ->counters[cm_attr->group][cm_attr->index]));
4280}
4281
4282#define CM_COUNTER_ATTR(_name, _group, _index) \
4283 { \
4284 .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \
4285 .group = _group, .index = _index \
4286 }
4287
4288#define CM_COUNTER_GROUP(_group, _name) \
4289 static struct cm_counter_attribute cm_counter_attr_##_group[] = { \
4290 CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \
4291 CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \
4292 CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \
4293 CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \
4294 CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \
4295 CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \
4296 CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \
4297 CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \
4298 CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \
4299 CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \
4300 CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \
4301 }; \
4302 static struct attribute *cm_counter_attrs_##_group[] = { \
4303 &cm_counter_attr_##_group[0].attr.attr, \
4304 &cm_counter_attr_##_group[1].attr.attr, \
4305 &cm_counter_attr_##_group[2].attr.attr, \
4306 &cm_counter_attr_##_group[3].attr.attr, \
4307 &cm_counter_attr_##_group[4].attr.attr, \
4308 &cm_counter_attr_##_group[5].attr.attr, \
4309 &cm_counter_attr_##_group[6].attr.attr, \
4310 &cm_counter_attr_##_group[7].attr.attr, \
4311 &cm_counter_attr_##_group[8].attr.attr, \
4312 &cm_counter_attr_##_group[9].attr.attr, \
4313 &cm_counter_attr_##_group[10].attr.attr, \
4314 NULL, \
4315 }; \
4316 static const struct attribute_group cm_counter_group_##_group = { \
4317 .name = _name, \
4318 .attrs = cm_counter_attrs_##_group, \
4319 };
4320
4321CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
4322CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
4323CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
4324CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
4325
4326static const struct attribute_group *cm_counter_groups[] = {
4327 &cm_counter_group_CM_XMIT,
4328 &cm_counter_group_CM_XMIT_RETRIES,
4329 &cm_counter_group_CM_RECV,
4330 &cm_counter_group_CM_RECV_DUPLICATES,
4331 NULL,
4332};
4333
4334static int cm_add_one(struct ib_device *ib_device)
4335{
4336 struct cm_device *cm_dev;
4337 struct cm_port *port;
4338 struct ib_mad_reg_req reg_req = {
4339 .mgmt_class = IB_MGMT_CLASS_CM,
4340 .mgmt_class_version = IB_CM_CLASS_VERSION,
4341 };
4342 struct ib_port_modify port_modify = {
4343 .set_port_cap_mask = IB_PORT_CM_SUP
4344 };
4345 unsigned long flags;
4346 int ret;
4347 int count = 0;
4348 u32 i;
4349
4350 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4351 GFP_KERNEL);
4352 if (!cm_dev)
4353 return -ENOMEM;
4354
4355 kref_init(&cm_dev->kref);
4356 spin_lock_init(&cm_dev->mad_agent_lock);
4357 cm_dev->ib_device = ib_device;
4358 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4359 cm_dev->going_down = 0;
4360
4361 ib_set_client_data(ib_device, &cm_client, cm_dev);
4362
4363 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4364 rdma_for_each_port (ib_device, i) {
4365 if (!rdma_cap_ib_cm(ib_device, i))
4366 continue;
4367
4368 port = kzalloc(sizeof *port, GFP_KERNEL);
4369 if (!port) {
4370 ret = -ENOMEM;
4371 goto error1;
4372 }
4373
4374 cm_dev->port[i-1] = port;
4375 port->cm_dev = cm_dev;
4376 port->port_num = i;
4377
4378 ret = ib_port_register_client_groups(ib_device, i,
4379 cm_counter_groups);
4380 if (ret)
4381 goto error1;
4382
4383 port->mad_agent = ib_register_mad_agent(ib_device, i,
4384 IB_QPT_GSI,
4385 ®_req,
4386 0,
4387 cm_send_handler,
4388 cm_recv_handler,
4389 port,
4390 0);
4391 if (IS_ERR(port->mad_agent)) {
4392 ret = PTR_ERR(port->mad_agent);
4393 goto error2;
4394 }
4395
4396 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4397 if (ret)
4398 goto error3;
4399
4400 count++;
4401 }
4402
4403 if (!count) {
4404 ret = -EOPNOTSUPP;
4405 goto free;
4406 }
4407
4408 write_lock_irqsave(&cm.device_lock, flags);
4409 list_add_tail(&cm_dev->list, &cm.device_list);
4410 write_unlock_irqrestore(&cm.device_lock, flags);
4411 return 0;
4412
4413error3:
4414 ib_unregister_mad_agent(port->mad_agent);
4415error2:
4416 ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
4417error1:
4418 port_modify.set_port_cap_mask = 0;
4419 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4420 while (--i) {
4421 if (!rdma_cap_ib_cm(ib_device, i))
4422 continue;
4423
4424 port = cm_dev->port[i-1];
4425 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4426 ib_unregister_mad_agent(port->mad_agent);
4427 ib_port_unregister_client_groups(ib_device, i,
4428 cm_counter_groups);
4429 }
4430free:
4431 cm_device_put(cm_dev);
4432 return ret;
4433}
4434
4435static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4436{
4437 struct cm_device *cm_dev = client_data;
4438 struct cm_port *port;
4439 struct ib_port_modify port_modify = {
4440 .clr_port_cap_mask = IB_PORT_CM_SUP
4441 };
4442 unsigned long flags;
4443 u32 i;
4444
4445 write_lock_irqsave(&cm.device_lock, flags);
4446 list_del(&cm_dev->list);
4447 write_unlock_irqrestore(&cm.device_lock, flags);
4448
4449 spin_lock_irq(&cm.lock);
4450 cm_dev->going_down = 1;
4451 spin_unlock_irq(&cm.lock);
4452
4453 rdma_for_each_port (ib_device, i) {
4454 struct ib_mad_agent *mad_agent;
4455
4456 if (!rdma_cap_ib_cm(ib_device, i))
4457 continue;
4458
4459 port = cm_dev->port[i-1];
4460 mad_agent = port->mad_agent;
4461 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4462 /*
4463 * We flush the queue here after the going_down set, this
4464 * verify that no new works will be queued in the recv handler,
4465 * after that we can call the unregister_mad_agent
4466 */
4467 flush_workqueue(cm.wq);
4468 /*
4469 * The above ensures no call paths from the work are running,
4470 * the remaining paths all take the mad_agent_lock.
4471 */
4472 spin_lock(&cm_dev->mad_agent_lock);
4473 port->mad_agent = NULL;
4474 spin_unlock(&cm_dev->mad_agent_lock);
4475 ib_unregister_mad_agent(mad_agent);
4476 ib_port_unregister_client_groups(ib_device, i,
4477 cm_counter_groups);
4478 }
4479
4480 cm_device_put(cm_dev);
4481}
4482
4483static int __init ib_cm_init(void)
4484{
4485 int ret;
4486
4487 INIT_LIST_HEAD(&cm.device_list);
4488 rwlock_init(&cm.device_lock);
4489 spin_lock_init(&cm.lock);
4490 cm.listen_service_table = RB_ROOT;
4491 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4492 cm.remote_id_table = RB_ROOT;
4493 cm.remote_qp_table = RB_ROOT;
4494 cm.remote_sidr_table = RB_ROOT;
4495 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4496 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4497 INIT_LIST_HEAD(&cm.timewait_list);
4498
4499 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4500 if (!cm.wq) {
4501 ret = -ENOMEM;
4502 goto error2;
4503 }
4504
4505 ret = ib_register_client(&cm_client);
4506 if (ret)
4507 goto error3;
4508
4509 return 0;
4510error3:
4511 destroy_workqueue(cm.wq);
4512error2:
4513 return ret;
4514}
4515
4516static void __exit ib_cm_cleanup(void)
4517{
4518 struct cm_timewait_info *timewait_info, *tmp;
4519
4520 spin_lock_irq(&cm.lock);
4521 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4522 cancel_delayed_work(&timewait_info->work.work);
4523 spin_unlock_irq(&cm.lock);
4524
4525 ib_unregister_client(&cm_client);
4526 destroy_workqueue(cm.wq);
4527
4528 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4529 list_del(&timewait_info->list);
4530 kfree(timewait_info);
4531 }
4532
4533 WARN_ON(!xa_empty(&cm.local_id_table));
4534}
4535
4536module_init(ib_cm_init);
4537module_exit(ib_cm_cleanup);
1/*
2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/completion.h>
37#include <linux/dma-mapping.h>
38#include <linux/device.h>
39#include <linux/err.h>
40#include <linux/idr.h>
41#include <linux/interrupt.h>
42#include <linux/random.h>
43#include <linux/rbtree.h>
44#include <linux/spinlock.h>
45#include <linux/slab.h>
46#include <linux/sysfs.h>
47#include <linux/workqueue.h>
48#include <linux/kdev_t.h>
49
50#include <rdma/ib_cache.h>
51#include <rdma/ib_cm.h>
52#include "cm_msgs.h"
53
54MODULE_AUTHOR("Sean Hefty");
55MODULE_DESCRIPTION("InfiniBand CM");
56MODULE_LICENSE("Dual BSD/GPL");
57
58static void cm_add_one(struct ib_device *device);
59static void cm_remove_one(struct ib_device *device);
60
61static struct ib_client cm_client = {
62 .name = "cm",
63 .add = cm_add_one,
64 .remove = cm_remove_one
65};
66
67static struct ib_cm {
68 spinlock_t lock;
69 struct list_head device_list;
70 rwlock_t device_lock;
71 struct rb_root listen_service_table;
72 u64 listen_service_id;
73 /* struct rb_root peer_service_table; todo: fix peer to peer */
74 struct rb_root remote_qp_table;
75 struct rb_root remote_id_table;
76 struct rb_root remote_sidr_table;
77 struct idr local_id_table;
78 __be32 random_id_operand;
79 struct list_head timewait_list;
80 struct workqueue_struct *wq;
81} cm;
82
83/* Counter indexes ordered by attribute ID */
84enum {
85 CM_REQ_COUNTER,
86 CM_MRA_COUNTER,
87 CM_REJ_COUNTER,
88 CM_REP_COUNTER,
89 CM_RTU_COUNTER,
90 CM_DREQ_COUNTER,
91 CM_DREP_COUNTER,
92 CM_SIDR_REQ_COUNTER,
93 CM_SIDR_REP_COUNTER,
94 CM_LAP_COUNTER,
95 CM_APR_COUNTER,
96 CM_ATTR_COUNT,
97 CM_ATTR_ID_OFFSET = 0x0010,
98};
99
100enum {
101 CM_XMIT,
102 CM_XMIT_RETRIES,
103 CM_RECV,
104 CM_RECV_DUPLICATES,
105 CM_COUNTER_GROUPS
106};
107
108static char const counter_group_names[CM_COUNTER_GROUPS]
109 [sizeof("cm_rx_duplicates")] = {
110 "cm_tx_msgs", "cm_tx_retries",
111 "cm_rx_msgs", "cm_rx_duplicates"
112};
113
114struct cm_counter_group {
115 struct kobject obj;
116 atomic_long_t counter[CM_ATTR_COUNT];
117};
118
119struct cm_counter_attribute {
120 struct attribute attr;
121 int index;
122};
123
124#define CM_COUNTER_ATTR(_name, _index) \
125struct cm_counter_attribute cm_##_name##_counter_attr = { \
126 .attr = { .name = __stringify(_name), .mode = 0444 }, \
127 .index = _index \
128}
129
130static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
131static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
132static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
133static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
134static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
135static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
136static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
137static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
138static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
139static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
140static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
141
142static struct attribute *cm_counter_default_attrs[] = {
143 &cm_req_counter_attr.attr,
144 &cm_mra_counter_attr.attr,
145 &cm_rej_counter_attr.attr,
146 &cm_rep_counter_attr.attr,
147 &cm_rtu_counter_attr.attr,
148 &cm_dreq_counter_attr.attr,
149 &cm_drep_counter_attr.attr,
150 &cm_sidr_req_counter_attr.attr,
151 &cm_sidr_rep_counter_attr.attr,
152 &cm_lap_counter_attr.attr,
153 &cm_apr_counter_attr.attr,
154 NULL
155};
156
157struct cm_port {
158 struct cm_device *cm_dev;
159 struct ib_mad_agent *mad_agent;
160 struct kobject port_obj;
161 u8 port_num;
162 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
163};
164
165struct cm_device {
166 struct list_head list;
167 struct ib_device *ib_device;
168 struct device *device;
169 u8 ack_delay;
170 struct cm_port *port[0];
171};
172
173struct cm_av {
174 struct cm_port *port;
175 union ib_gid dgid;
176 struct ib_ah_attr ah_attr;
177 u16 pkey_index;
178 u8 timeout;
179};
180
181struct cm_work {
182 struct delayed_work work;
183 struct list_head list;
184 struct cm_port *port;
185 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
186 __be32 local_id; /* Established / timewait */
187 __be32 remote_id;
188 struct ib_cm_event cm_event;
189 struct ib_sa_path_rec path[0];
190};
191
192struct cm_timewait_info {
193 struct cm_work work; /* Must be first. */
194 struct list_head list;
195 struct rb_node remote_qp_node;
196 struct rb_node remote_id_node;
197 __be64 remote_ca_guid;
198 __be32 remote_qpn;
199 u8 inserted_remote_qp;
200 u8 inserted_remote_id;
201};
202
203struct cm_id_private {
204 struct ib_cm_id id;
205
206 struct rb_node service_node;
207 struct rb_node sidr_id_node;
208 spinlock_t lock; /* Do not acquire inside cm.lock */
209 struct completion comp;
210 atomic_t refcount;
211
212 struct ib_mad_send_buf *msg;
213 struct cm_timewait_info *timewait_info;
214 /* todo: use alternate port on send failure */
215 struct cm_av av;
216 struct cm_av alt_av;
217 struct ib_cm_compare_data *compare_data;
218
219 void *private_data;
220 __be64 tid;
221 __be32 local_qpn;
222 __be32 remote_qpn;
223 enum ib_qp_type qp_type;
224 __be32 sq_psn;
225 __be32 rq_psn;
226 int timeout_ms;
227 enum ib_mtu path_mtu;
228 __be16 pkey;
229 u8 private_data_len;
230 u8 max_cm_retries;
231 u8 peer_to_peer;
232 u8 responder_resources;
233 u8 initiator_depth;
234 u8 retry_count;
235 u8 rnr_retry_count;
236 u8 service_timeout;
237 u8 target_ack_delay;
238
239 struct list_head work_list;
240 atomic_t work_count;
241};
242
243static void cm_work_handler(struct work_struct *work);
244
245static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
246{
247 if (atomic_dec_and_test(&cm_id_priv->refcount))
248 complete(&cm_id_priv->comp);
249}
250
251static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
252 struct ib_mad_send_buf **msg)
253{
254 struct ib_mad_agent *mad_agent;
255 struct ib_mad_send_buf *m;
256 struct ib_ah *ah;
257
258 mad_agent = cm_id_priv->av.port->mad_agent;
259 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
260 if (IS_ERR(ah))
261 return PTR_ERR(ah);
262
263 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
264 cm_id_priv->av.pkey_index,
265 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
266 GFP_ATOMIC);
267 if (IS_ERR(m)) {
268 ib_destroy_ah(ah);
269 return PTR_ERR(m);
270 }
271
272 /* Timeout set by caller if response is expected. */
273 m->ah = ah;
274 m->retries = cm_id_priv->max_cm_retries;
275
276 atomic_inc(&cm_id_priv->refcount);
277 m->context[0] = cm_id_priv;
278 *msg = m;
279 return 0;
280}
281
282static int cm_alloc_response_msg(struct cm_port *port,
283 struct ib_mad_recv_wc *mad_recv_wc,
284 struct ib_mad_send_buf **msg)
285{
286 struct ib_mad_send_buf *m;
287 struct ib_ah *ah;
288
289 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
290 mad_recv_wc->recv_buf.grh, port->port_num);
291 if (IS_ERR(ah))
292 return PTR_ERR(ah);
293
294 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
295 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
296 GFP_ATOMIC);
297 if (IS_ERR(m)) {
298 ib_destroy_ah(ah);
299 return PTR_ERR(m);
300 }
301 m->ah = ah;
302 *msg = m;
303 return 0;
304}
305
306static void cm_free_msg(struct ib_mad_send_buf *msg)
307{
308 ib_destroy_ah(msg->ah);
309 if (msg->context[0])
310 cm_deref_id(msg->context[0]);
311 ib_free_send_mad(msg);
312}
313
314static void * cm_copy_private_data(const void *private_data,
315 u8 private_data_len)
316{
317 void *data;
318
319 if (!private_data || !private_data_len)
320 return NULL;
321
322 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
323 if (!data)
324 return ERR_PTR(-ENOMEM);
325
326 return data;
327}
328
329static void cm_set_private_data(struct cm_id_private *cm_id_priv,
330 void *private_data, u8 private_data_len)
331{
332 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
333 kfree(cm_id_priv->private_data);
334
335 cm_id_priv->private_data = private_data;
336 cm_id_priv->private_data_len = private_data_len;
337}
338
339static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
340 struct ib_grh *grh, struct cm_av *av)
341{
342 av->port = port;
343 av->pkey_index = wc->pkey_index;
344 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
345 grh, &av->ah_attr);
346}
347
348static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
349{
350 struct cm_device *cm_dev;
351 struct cm_port *port = NULL;
352 unsigned long flags;
353 int ret;
354 u8 p;
355
356 read_lock_irqsave(&cm.device_lock, flags);
357 list_for_each_entry(cm_dev, &cm.device_list, list) {
358 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
359 &p, NULL)) {
360 port = cm_dev->port[p-1];
361 break;
362 }
363 }
364 read_unlock_irqrestore(&cm.device_lock, flags);
365
366 if (!port)
367 return -EINVAL;
368
369 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
370 be16_to_cpu(path->pkey), &av->pkey_index);
371 if (ret)
372 return ret;
373
374 av->port = port;
375 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
376 &av->ah_attr);
377 av->timeout = path->packet_life_time + 1;
378 return 0;
379}
380
381static int cm_alloc_id(struct cm_id_private *cm_id_priv)
382{
383 unsigned long flags;
384 int ret, id;
385 static int next_id;
386
387 do {
388 spin_lock_irqsave(&cm.lock, flags);
389 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
390 next_id, &id);
391 if (!ret)
392 next_id = ((unsigned) id + 1) & MAX_ID_MASK;
393 spin_unlock_irqrestore(&cm.lock, flags);
394 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
395
396 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
397 return ret;
398}
399
400static void cm_free_id(__be32 local_id)
401{
402 spin_lock_irq(&cm.lock);
403 idr_remove(&cm.local_id_table,
404 (__force int) (local_id ^ cm.random_id_operand));
405 spin_unlock_irq(&cm.lock);
406}
407
408static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
409{
410 struct cm_id_private *cm_id_priv;
411
412 cm_id_priv = idr_find(&cm.local_id_table,
413 (__force int) (local_id ^ cm.random_id_operand));
414 if (cm_id_priv) {
415 if (cm_id_priv->id.remote_id == remote_id)
416 atomic_inc(&cm_id_priv->refcount);
417 else
418 cm_id_priv = NULL;
419 }
420
421 return cm_id_priv;
422}
423
424static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
425{
426 struct cm_id_private *cm_id_priv;
427
428 spin_lock_irq(&cm.lock);
429 cm_id_priv = cm_get_id(local_id, remote_id);
430 spin_unlock_irq(&cm.lock);
431
432 return cm_id_priv;
433}
434
435static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
436{
437 int i;
438
439 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
440 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
441 ((unsigned long *) mask)[i];
442}
443
444static int cm_compare_data(struct ib_cm_compare_data *src_data,
445 struct ib_cm_compare_data *dst_data)
446{
447 u8 src[IB_CM_COMPARE_SIZE];
448 u8 dst[IB_CM_COMPARE_SIZE];
449
450 if (!src_data || !dst_data)
451 return 0;
452
453 cm_mask_copy(src, src_data->data, dst_data->mask);
454 cm_mask_copy(dst, dst_data->data, src_data->mask);
455 return memcmp(src, dst, IB_CM_COMPARE_SIZE);
456}
457
458static int cm_compare_private_data(u8 *private_data,
459 struct ib_cm_compare_data *dst_data)
460{
461 u8 src[IB_CM_COMPARE_SIZE];
462
463 if (!dst_data)
464 return 0;
465
466 cm_mask_copy(src, private_data, dst_data->mask);
467 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
468}
469
470/*
471 * Trivial helpers to strip endian annotation and compare; the
472 * endianness doesn't actually matter since we just need a stable
473 * order for the RB tree.
474 */
475static int be32_lt(__be32 a, __be32 b)
476{
477 return (__force u32) a < (__force u32) b;
478}
479
480static int be32_gt(__be32 a, __be32 b)
481{
482 return (__force u32) a > (__force u32) b;
483}
484
485static int be64_lt(__be64 a, __be64 b)
486{
487 return (__force u64) a < (__force u64) b;
488}
489
490static int be64_gt(__be64 a, __be64 b)
491{
492 return (__force u64) a > (__force u64) b;
493}
494
495static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
496{
497 struct rb_node **link = &cm.listen_service_table.rb_node;
498 struct rb_node *parent = NULL;
499 struct cm_id_private *cur_cm_id_priv;
500 __be64 service_id = cm_id_priv->id.service_id;
501 __be64 service_mask = cm_id_priv->id.service_mask;
502 int data_cmp;
503
504 while (*link) {
505 parent = *link;
506 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
507 service_node);
508 data_cmp = cm_compare_data(cm_id_priv->compare_data,
509 cur_cm_id_priv->compare_data);
510 if ((cur_cm_id_priv->id.service_mask & service_id) ==
511 (service_mask & cur_cm_id_priv->id.service_id) &&
512 (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
513 !data_cmp)
514 return cur_cm_id_priv;
515
516 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
517 link = &(*link)->rb_left;
518 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
519 link = &(*link)->rb_right;
520 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
521 link = &(*link)->rb_left;
522 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
523 link = &(*link)->rb_right;
524 else if (data_cmp < 0)
525 link = &(*link)->rb_left;
526 else
527 link = &(*link)->rb_right;
528 }
529 rb_link_node(&cm_id_priv->service_node, parent, link);
530 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
531 return NULL;
532}
533
534static struct cm_id_private * cm_find_listen(struct ib_device *device,
535 __be64 service_id,
536 u8 *private_data)
537{
538 struct rb_node *node = cm.listen_service_table.rb_node;
539 struct cm_id_private *cm_id_priv;
540 int data_cmp;
541
542 while (node) {
543 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
544 data_cmp = cm_compare_private_data(private_data,
545 cm_id_priv->compare_data);
546 if ((cm_id_priv->id.service_mask & service_id) ==
547 cm_id_priv->id.service_id &&
548 (cm_id_priv->id.device == device) && !data_cmp)
549 return cm_id_priv;
550
551 if (device < cm_id_priv->id.device)
552 node = node->rb_left;
553 else if (device > cm_id_priv->id.device)
554 node = node->rb_right;
555 else if (be64_lt(service_id, cm_id_priv->id.service_id))
556 node = node->rb_left;
557 else if (be64_gt(service_id, cm_id_priv->id.service_id))
558 node = node->rb_right;
559 else if (data_cmp < 0)
560 node = node->rb_left;
561 else
562 node = node->rb_right;
563 }
564 return NULL;
565}
566
567static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
568 *timewait_info)
569{
570 struct rb_node **link = &cm.remote_id_table.rb_node;
571 struct rb_node *parent = NULL;
572 struct cm_timewait_info *cur_timewait_info;
573 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
574 __be32 remote_id = timewait_info->work.remote_id;
575
576 while (*link) {
577 parent = *link;
578 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
579 remote_id_node);
580 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
581 link = &(*link)->rb_left;
582 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
583 link = &(*link)->rb_right;
584 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
585 link = &(*link)->rb_left;
586 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
587 link = &(*link)->rb_right;
588 else
589 return cur_timewait_info;
590 }
591 timewait_info->inserted_remote_id = 1;
592 rb_link_node(&timewait_info->remote_id_node, parent, link);
593 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
594 return NULL;
595}
596
597static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
598 __be32 remote_id)
599{
600 struct rb_node *node = cm.remote_id_table.rb_node;
601 struct cm_timewait_info *timewait_info;
602
603 while (node) {
604 timewait_info = rb_entry(node, struct cm_timewait_info,
605 remote_id_node);
606 if (be32_lt(remote_id, timewait_info->work.remote_id))
607 node = node->rb_left;
608 else if (be32_gt(remote_id, timewait_info->work.remote_id))
609 node = node->rb_right;
610 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
611 node = node->rb_left;
612 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
613 node = node->rb_right;
614 else
615 return timewait_info;
616 }
617 return NULL;
618}
619
620static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
621 *timewait_info)
622{
623 struct rb_node **link = &cm.remote_qp_table.rb_node;
624 struct rb_node *parent = NULL;
625 struct cm_timewait_info *cur_timewait_info;
626 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
627 __be32 remote_qpn = timewait_info->remote_qpn;
628
629 while (*link) {
630 parent = *link;
631 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
632 remote_qp_node);
633 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
634 link = &(*link)->rb_left;
635 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
636 link = &(*link)->rb_right;
637 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
638 link = &(*link)->rb_left;
639 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
640 link = &(*link)->rb_right;
641 else
642 return cur_timewait_info;
643 }
644 timewait_info->inserted_remote_qp = 1;
645 rb_link_node(&timewait_info->remote_qp_node, parent, link);
646 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
647 return NULL;
648}
649
650static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
651 *cm_id_priv)
652{
653 struct rb_node **link = &cm.remote_sidr_table.rb_node;
654 struct rb_node *parent = NULL;
655 struct cm_id_private *cur_cm_id_priv;
656 union ib_gid *port_gid = &cm_id_priv->av.dgid;
657 __be32 remote_id = cm_id_priv->id.remote_id;
658
659 while (*link) {
660 parent = *link;
661 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
662 sidr_id_node);
663 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
664 link = &(*link)->rb_left;
665 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
666 link = &(*link)->rb_right;
667 else {
668 int cmp;
669 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
670 sizeof *port_gid);
671 if (cmp < 0)
672 link = &(*link)->rb_left;
673 else if (cmp > 0)
674 link = &(*link)->rb_right;
675 else
676 return cur_cm_id_priv;
677 }
678 }
679 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
680 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
681 return NULL;
682}
683
684static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
685 enum ib_cm_sidr_status status)
686{
687 struct ib_cm_sidr_rep_param param;
688
689 memset(¶m, 0, sizeof param);
690 param.status = status;
691 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
692}
693
694struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
695 ib_cm_handler cm_handler,
696 void *context)
697{
698 struct cm_id_private *cm_id_priv;
699 int ret;
700
701 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
702 if (!cm_id_priv)
703 return ERR_PTR(-ENOMEM);
704
705 cm_id_priv->id.state = IB_CM_IDLE;
706 cm_id_priv->id.device = device;
707 cm_id_priv->id.cm_handler = cm_handler;
708 cm_id_priv->id.context = context;
709 cm_id_priv->id.remote_cm_qpn = 1;
710 ret = cm_alloc_id(cm_id_priv);
711 if (ret)
712 goto error;
713
714 spin_lock_init(&cm_id_priv->lock);
715 init_completion(&cm_id_priv->comp);
716 INIT_LIST_HEAD(&cm_id_priv->work_list);
717 atomic_set(&cm_id_priv->work_count, -1);
718 atomic_set(&cm_id_priv->refcount, 1);
719 return &cm_id_priv->id;
720
721error:
722 kfree(cm_id_priv);
723 return ERR_PTR(-ENOMEM);
724}
725EXPORT_SYMBOL(ib_create_cm_id);
726
727static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
728{
729 struct cm_work *work;
730
731 if (list_empty(&cm_id_priv->work_list))
732 return NULL;
733
734 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
735 list_del(&work->list);
736 return work;
737}
738
739static void cm_free_work(struct cm_work *work)
740{
741 if (work->mad_recv_wc)
742 ib_free_recv_mad(work->mad_recv_wc);
743 kfree(work);
744}
745
746static inline int cm_convert_to_ms(int iba_time)
747{
748 /* approximate conversion to ms from 4.096us x 2^iba_time */
749 return 1 << max(iba_time - 8, 0);
750}
751
752/*
753 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
754 * Because of how ack_timeout is stored, adding one doubles the timeout.
755 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
756 * increment it (round up) only if the other is within 50%.
757 */
758static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
759{
760 int ack_timeout = packet_life_time + 1;
761
762 if (ack_timeout >= ca_ack_delay)
763 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
764 else
765 ack_timeout = ca_ack_delay +
766 (ack_timeout >= (ca_ack_delay - 1));
767
768 return min(31, ack_timeout);
769}
770
771static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
772{
773 if (timewait_info->inserted_remote_id) {
774 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
775 timewait_info->inserted_remote_id = 0;
776 }
777
778 if (timewait_info->inserted_remote_qp) {
779 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
780 timewait_info->inserted_remote_qp = 0;
781 }
782}
783
784static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
785{
786 struct cm_timewait_info *timewait_info;
787
788 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
789 if (!timewait_info)
790 return ERR_PTR(-ENOMEM);
791
792 timewait_info->work.local_id = local_id;
793 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
794 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
795 return timewait_info;
796}
797
798static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
799{
800 int wait_time;
801 unsigned long flags;
802
803 spin_lock_irqsave(&cm.lock, flags);
804 cm_cleanup_timewait(cm_id_priv->timewait_info);
805 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
806 spin_unlock_irqrestore(&cm.lock, flags);
807
808 /*
809 * The cm_id could be destroyed by the user before we exit timewait.
810 * To protect against this, we search for the cm_id after exiting
811 * timewait before notifying the user that we've exited timewait.
812 */
813 cm_id_priv->id.state = IB_CM_TIMEWAIT;
814 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
815 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
816 msecs_to_jiffies(wait_time));
817 cm_id_priv->timewait_info = NULL;
818}
819
820static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
821{
822 unsigned long flags;
823
824 cm_id_priv->id.state = IB_CM_IDLE;
825 if (cm_id_priv->timewait_info) {
826 spin_lock_irqsave(&cm.lock, flags);
827 cm_cleanup_timewait(cm_id_priv->timewait_info);
828 spin_unlock_irqrestore(&cm.lock, flags);
829 kfree(cm_id_priv->timewait_info);
830 cm_id_priv->timewait_info = NULL;
831 }
832}
833
834static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
835{
836 struct cm_id_private *cm_id_priv;
837 struct cm_work *work;
838
839 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
840retest:
841 spin_lock_irq(&cm_id_priv->lock);
842 switch (cm_id->state) {
843 case IB_CM_LISTEN:
844 cm_id->state = IB_CM_IDLE;
845 spin_unlock_irq(&cm_id_priv->lock);
846 spin_lock_irq(&cm.lock);
847 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
848 spin_unlock_irq(&cm.lock);
849 break;
850 case IB_CM_SIDR_REQ_SENT:
851 cm_id->state = IB_CM_IDLE;
852 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
853 spin_unlock_irq(&cm_id_priv->lock);
854 break;
855 case IB_CM_SIDR_REQ_RCVD:
856 spin_unlock_irq(&cm_id_priv->lock);
857 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
858 break;
859 case IB_CM_REQ_SENT:
860 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
861 spin_unlock_irq(&cm_id_priv->lock);
862 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
863 &cm_id_priv->id.device->node_guid,
864 sizeof cm_id_priv->id.device->node_guid,
865 NULL, 0);
866 break;
867 case IB_CM_REQ_RCVD:
868 if (err == -ENOMEM) {
869 /* Do not reject to allow future retries. */
870 cm_reset_to_idle(cm_id_priv);
871 spin_unlock_irq(&cm_id_priv->lock);
872 } else {
873 spin_unlock_irq(&cm_id_priv->lock);
874 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
875 NULL, 0, NULL, 0);
876 }
877 break;
878 case IB_CM_MRA_REQ_RCVD:
879 case IB_CM_REP_SENT:
880 case IB_CM_MRA_REP_RCVD:
881 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
882 /* Fall through */
883 case IB_CM_MRA_REQ_SENT:
884 case IB_CM_REP_RCVD:
885 case IB_CM_MRA_REP_SENT:
886 spin_unlock_irq(&cm_id_priv->lock);
887 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
888 NULL, 0, NULL, 0);
889 break;
890 case IB_CM_ESTABLISHED:
891 spin_unlock_irq(&cm_id_priv->lock);
892 ib_send_cm_dreq(cm_id, NULL, 0);
893 goto retest;
894 case IB_CM_DREQ_SENT:
895 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
896 cm_enter_timewait(cm_id_priv);
897 spin_unlock_irq(&cm_id_priv->lock);
898 break;
899 case IB_CM_DREQ_RCVD:
900 spin_unlock_irq(&cm_id_priv->lock);
901 ib_send_cm_drep(cm_id, NULL, 0);
902 break;
903 default:
904 spin_unlock_irq(&cm_id_priv->lock);
905 break;
906 }
907
908 cm_free_id(cm_id->local_id);
909 cm_deref_id(cm_id_priv);
910 wait_for_completion(&cm_id_priv->comp);
911 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
912 cm_free_work(work);
913 kfree(cm_id_priv->compare_data);
914 kfree(cm_id_priv->private_data);
915 kfree(cm_id_priv);
916}
917
918void ib_destroy_cm_id(struct ib_cm_id *cm_id)
919{
920 cm_destroy_id(cm_id, 0);
921}
922EXPORT_SYMBOL(ib_destroy_cm_id);
923
924int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
925 struct ib_cm_compare_data *compare_data)
926{
927 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
928 unsigned long flags;
929 int ret = 0;
930
931 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
932 service_id &= service_mask;
933 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
934 (service_id != IB_CM_ASSIGN_SERVICE_ID))
935 return -EINVAL;
936
937 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
938 if (cm_id->state != IB_CM_IDLE)
939 return -EINVAL;
940
941 if (compare_data) {
942 cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
943 GFP_KERNEL);
944 if (!cm_id_priv->compare_data)
945 return -ENOMEM;
946 cm_mask_copy(cm_id_priv->compare_data->data,
947 compare_data->data, compare_data->mask);
948 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
949 IB_CM_COMPARE_SIZE);
950 }
951
952 cm_id->state = IB_CM_LISTEN;
953
954 spin_lock_irqsave(&cm.lock, flags);
955 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
956 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
957 cm_id->service_mask = ~cpu_to_be64(0);
958 } else {
959 cm_id->service_id = service_id;
960 cm_id->service_mask = service_mask;
961 }
962 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
963 spin_unlock_irqrestore(&cm.lock, flags);
964
965 if (cur_cm_id_priv) {
966 cm_id->state = IB_CM_IDLE;
967 kfree(cm_id_priv->compare_data);
968 cm_id_priv->compare_data = NULL;
969 ret = -EBUSY;
970 }
971 return ret;
972}
973EXPORT_SYMBOL(ib_cm_listen);
974
975static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
976 enum cm_msg_sequence msg_seq)
977{
978 u64 hi_tid, low_tid;
979
980 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
981 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
982 (msg_seq << 30));
983 return cpu_to_be64(hi_tid | low_tid);
984}
985
986static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
987 __be16 attr_id, __be64 tid)
988{
989 hdr->base_version = IB_MGMT_BASE_VERSION;
990 hdr->mgmt_class = IB_MGMT_CLASS_CM;
991 hdr->class_version = IB_CM_CLASS_VERSION;
992 hdr->method = IB_MGMT_METHOD_SEND;
993 hdr->attr_id = attr_id;
994 hdr->tid = tid;
995}
996
997static void cm_format_req(struct cm_req_msg *req_msg,
998 struct cm_id_private *cm_id_priv,
999 struct ib_cm_req_param *param)
1000{
1001 struct ib_sa_path_rec *pri_path = param->primary_path;
1002 struct ib_sa_path_rec *alt_path = param->alternate_path;
1003
1004 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1005 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1006
1007 req_msg->local_comm_id = cm_id_priv->id.local_id;
1008 req_msg->service_id = param->service_id;
1009 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1010 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1011 cm_req_set_resp_res(req_msg, param->responder_resources);
1012 cm_req_set_init_depth(req_msg, param->initiator_depth);
1013 cm_req_set_remote_resp_timeout(req_msg,
1014 param->remote_cm_response_timeout);
1015 cm_req_set_qp_type(req_msg, param->qp_type);
1016 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1017 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1018 cm_req_set_local_resp_timeout(req_msg,
1019 param->local_cm_response_timeout);
1020 cm_req_set_retry_count(req_msg, param->retry_count);
1021 req_msg->pkey = param->primary_path->pkey;
1022 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1023 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1024 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1025 cm_req_set_srq(req_msg, param->srq);
1026
1027 if (pri_path->hop_limit <= 1) {
1028 req_msg->primary_local_lid = pri_path->slid;
1029 req_msg->primary_remote_lid = pri_path->dlid;
1030 } else {
1031 /* Work-around until there's a way to obtain remote LID info */
1032 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1033 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1034 }
1035 req_msg->primary_local_gid = pri_path->sgid;
1036 req_msg->primary_remote_gid = pri_path->dgid;
1037 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1038 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1039 req_msg->primary_traffic_class = pri_path->traffic_class;
1040 req_msg->primary_hop_limit = pri_path->hop_limit;
1041 cm_req_set_primary_sl(req_msg, pri_path->sl);
1042 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1043 cm_req_set_primary_local_ack_timeout(req_msg,
1044 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1045 pri_path->packet_life_time));
1046
1047 if (alt_path) {
1048 if (alt_path->hop_limit <= 1) {
1049 req_msg->alt_local_lid = alt_path->slid;
1050 req_msg->alt_remote_lid = alt_path->dlid;
1051 } else {
1052 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1053 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1054 }
1055 req_msg->alt_local_gid = alt_path->sgid;
1056 req_msg->alt_remote_gid = alt_path->dgid;
1057 cm_req_set_alt_flow_label(req_msg,
1058 alt_path->flow_label);
1059 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1060 req_msg->alt_traffic_class = alt_path->traffic_class;
1061 req_msg->alt_hop_limit = alt_path->hop_limit;
1062 cm_req_set_alt_sl(req_msg, alt_path->sl);
1063 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1064 cm_req_set_alt_local_ack_timeout(req_msg,
1065 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1066 alt_path->packet_life_time));
1067 }
1068
1069 if (param->private_data && param->private_data_len)
1070 memcpy(req_msg->private_data, param->private_data,
1071 param->private_data_len);
1072}
1073
1074static int cm_validate_req_param(struct ib_cm_req_param *param)
1075{
1076 /* peer-to-peer not supported */
1077 if (param->peer_to_peer)
1078 return -EINVAL;
1079
1080 if (!param->primary_path)
1081 return -EINVAL;
1082
1083 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
1084 return -EINVAL;
1085
1086 if (param->private_data &&
1087 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1088 return -EINVAL;
1089
1090 if (param->alternate_path &&
1091 (param->alternate_path->pkey != param->primary_path->pkey ||
1092 param->alternate_path->mtu != param->primary_path->mtu))
1093 return -EINVAL;
1094
1095 return 0;
1096}
1097
1098int ib_send_cm_req(struct ib_cm_id *cm_id,
1099 struct ib_cm_req_param *param)
1100{
1101 struct cm_id_private *cm_id_priv;
1102 struct cm_req_msg *req_msg;
1103 unsigned long flags;
1104 int ret;
1105
1106 ret = cm_validate_req_param(param);
1107 if (ret)
1108 return ret;
1109
1110 /* Verify that we're not in timewait. */
1111 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1112 spin_lock_irqsave(&cm_id_priv->lock, flags);
1113 if (cm_id->state != IB_CM_IDLE) {
1114 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1115 ret = -EINVAL;
1116 goto out;
1117 }
1118 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1119
1120 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1121 id.local_id);
1122 if (IS_ERR(cm_id_priv->timewait_info)) {
1123 ret = PTR_ERR(cm_id_priv->timewait_info);
1124 goto out;
1125 }
1126
1127 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1128 if (ret)
1129 goto error1;
1130 if (param->alternate_path) {
1131 ret = cm_init_av_by_path(param->alternate_path,
1132 &cm_id_priv->alt_av);
1133 if (ret)
1134 goto error1;
1135 }
1136 cm_id->service_id = param->service_id;
1137 cm_id->service_mask = ~cpu_to_be64(0);
1138 cm_id_priv->timeout_ms = cm_convert_to_ms(
1139 param->primary_path->packet_life_time) * 2 +
1140 cm_convert_to_ms(
1141 param->remote_cm_response_timeout);
1142 cm_id_priv->max_cm_retries = param->max_cm_retries;
1143 cm_id_priv->initiator_depth = param->initiator_depth;
1144 cm_id_priv->responder_resources = param->responder_resources;
1145 cm_id_priv->retry_count = param->retry_count;
1146 cm_id_priv->path_mtu = param->primary_path->mtu;
1147 cm_id_priv->pkey = param->primary_path->pkey;
1148 cm_id_priv->qp_type = param->qp_type;
1149
1150 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1151 if (ret)
1152 goto error1;
1153
1154 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1155 cm_format_req(req_msg, cm_id_priv, param);
1156 cm_id_priv->tid = req_msg->hdr.tid;
1157 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1158 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1159
1160 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1161 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1162
1163 spin_lock_irqsave(&cm_id_priv->lock, flags);
1164 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1165 if (ret) {
1166 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1167 goto error2;
1168 }
1169 BUG_ON(cm_id->state != IB_CM_IDLE);
1170 cm_id->state = IB_CM_REQ_SENT;
1171 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1172 return 0;
1173
1174error2: cm_free_msg(cm_id_priv->msg);
1175error1: kfree(cm_id_priv->timewait_info);
1176out: return ret;
1177}
1178EXPORT_SYMBOL(ib_send_cm_req);
1179
1180static int cm_issue_rej(struct cm_port *port,
1181 struct ib_mad_recv_wc *mad_recv_wc,
1182 enum ib_cm_rej_reason reason,
1183 enum cm_msg_response msg_rejected,
1184 void *ari, u8 ari_length)
1185{
1186 struct ib_mad_send_buf *msg = NULL;
1187 struct cm_rej_msg *rej_msg, *rcv_msg;
1188 int ret;
1189
1190 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1191 if (ret)
1192 return ret;
1193
1194 /* We just need common CM header information. Cast to any message. */
1195 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1196 rej_msg = (struct cm_rej_msg *) msg->mad;
1197
1198 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1199 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1200 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1201 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1202 rej_msg->reason = cpu_to_be16(reason);
1203
1204 if (ari && ari_length) {
1205 cm_rej_set_reject_info_len(rej_msg, ari_length);
1206 memcpy(rej_msg->ari, ari, ari_length);
1207 }
1208
1209 ret = ib_post_send_mad(msg, NULL);
1210 if (ret)
1211 cm_free_msg(msg);
1212
1213 return ret;
1214}
1215
1216static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1217 __be32 local_qpn, __be32 remote_qpn)
1218{
1219 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1220 ((local_ca_guid == remote_ca_guid) &&
1221 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1222}
1223
1224static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1225 struct ib_sa_path_rec *primary_path,
1226 struct ib_sa_path_rec *alt_path)
1227{
1228 memset(primary_path, 0, sizeof *primary_path);
1229 primary_path->dgid = req_msg->primary_local_gid;
1230 primary_path->sgid = req_msg->primary_remote_gid;
1231 primary_path->dlid = req_msg->primary_local_lid;
1232 primary_path->slid = req_msg->primary_remote_lid;
1233 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1234 primary_path->hop_limit = req_msg->primary_hop_limit;
1235 primary_path->traffic_class = req_msg->primary_traffic_class;
1236 primary_path->reversible = 1;
1237 primary_path->pkey = req_msg->pkey;
1238 primary_path->sl = cm_req_get_primary_sl(req_msg);
1239 primary_path->mtu_selector = IB_SA_EQ;
1240 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1241 primary_path->rate_selector = IB_SA_EQ;
1242 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1243 primary_path->packet_life_time_selector = IB_SA_EQ;
1244 primary_path->packet_life_time =
1245 cm_req_get_primary_local_ack_timeout(req_msg);
1246 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1247
1248 if (req_msg->alt_local_lid) {
1249 memset(alt_path, 0, sizeof *alt_path);
1250 alt_path->dgid = req_msg->alt_local_gid;
1251 alt_path->sgid = req_msg->alt_remote_gid;
1252 alt_path->dlid = req_msg->alt_local_lid;
1253 alt_path->slid = req_msg->alt_remote_lid;
1254 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1255 alt_path->hop_limit = req_msg->alt_hop_limit;
1256 alt_path->traffic_class = req_msg->alt_traffic_class;
1257 alt_path->reversible = 1;
1258 alt_path->pkey = req_msg->pkey;
1259 alt_path->sl = cm_req_get_alt_sl(req_msg);
1260 alt_path->mtu_selector = IB_SA_EQ;
1261 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1262 alt_path->rate_selector = IB_SA_EQ;
1263 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1264 alt_path->packet_life_time_selector = IB_SA_EQ;
1265 alt_path->packet_life_time =
1266 cm_req_get_alt_local_ack_timeout(req_msg);
1267 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1268 }
1269}
1270
1271static void cm_format_req_event(struct cm_work *work,
1272 struct cm_id_private *cm_id_priv,
1273 struct ib_cm_id *listen_id)
1274{
1275 struct cm_req_msg *req_msg;
1276 struct ib_cm_req_event_param *param;
1277
1278 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1279 param = &work->cm_event.param.req_rcvd;
1280 param->listen_id = listen_id;
1281 param->port = cm_id_priv->av.port->port_num;
1282 param->primary_path = &work->path[0];
1283 if (req_msg->alt_local_lid)
1284 param->alternate_path = &work->path[1];
1285 else
1286 param->alternate_path = NULL;
1287 param->remote_ca_guid = req_msg->local_ca_guid;
1288 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1289 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1290 param->qp_type = cm_req_get_qp_type(req_msg);
1291 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1292 param->responder_resources = cm_req_get_init_depth(req_msg);
1293 param->initiator_depth = cm_req_get_resp_res(req_msg);
1294 param->local_cm_response_timeout =
1295 cm_req_get_remote_resp_timeout(req_msg);
1296 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1297 param->remote_cm_response_timeout =
1298 cm_req_get_local_resp_timeout(req_msg);
1299 param->retry_count = cm_req_get_retry_count(req_msg);
1300 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1301 param->srq = cm_req_get_srq(req_msg);
1302 work->cm_event.private_data = &req_msg->private_data;
1303}
1304
1305static void cm_process_work(struct cm_id_private *cm_id_priv,
1306 struct cm_work *work)
1307{
1308 int ret;
1309
1310 /* We will typically only have the current event to report. */
1311 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1312 cm_free_work(work);
1313
1314 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1315 spin_lock_irq(&cm_id_priv->lock);
1316 work = cm_dequeue_work(cm_id_priv);
1317 spin_unlock_irq(&cm_id_priv->lock);
1318 BUG_ON(!work);
1319 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1320 &work->cm_event);
1321 cm_free_work(work);
1322 }
1323 cm_deref_id(cm_id_priv);
1324 if (ret)
1325 cm_destroy_id(&cm_id_priv->id, ret);
1326}
1327
1328static void cm_format_mra(struct cm_mra_msg *mra_msg,
1329 struct cm_id_private *cm_id_priv,
1330 enum cm_msg_response msg_mraed, u8 service_timeout,
1331 const void *private_data, u8 private_data_len)
1332{
1333 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1334 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1335 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1336 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1337 cm_mra_set_service_timeout(mra_msg, service_timeout);
1338
1339 if (private_data && private_data_len)
1340 memcpy(mra_msg->private_data, private_data, private_data_len);
1341}
1342
1343static void cm_format_rej(struct cm_rej_msg *rej_msg,
1344 struct cm_id_private *cm_id_priv,
1345 enum ib_cm_rej_reason reason,
1346 void *ari,
1347 u8 ari_length,
1348 const void *private_data,
1349 u8 private_data_len)
1350{
1351 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1352 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1353
1354 switch(cm_id_priv->id.state) {
1355 case IB_CM_REQ_RCVD:
1356 rej_msg->local_comm_id = 0;
1357 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1358 break;
1359 case IB_CM_MRA_REQ_SENT:
1360 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1361 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1362 break;
1363 case IB_CM_REP_RCVD:
1364 case IB_CM_MRA_REP_SENT:
1365 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1366 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1367 break;
1368 default:
1369 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1370 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1371 break;
1372 }
1373
1374 rej_msg->reason = cpu_to_be16(reason);
1375 if (ari && ari_length) {
1376 cm_rej_set_reject_info_len(rej_msg, ari_length);
1377 memcpy(rej_msg->ari, ari, ari_length);
1378 }
1379
1380 if (private_data && private_data_len)
1381 memcpy(rej_msg->private_data, private_data, private_data_len);
1382}
1383
1384static void cm_dup_req_handler(struct cm_work *work,
1385 struct cm_id_private *cm_id_priv)
1386{
1387 struct ib_mad_send_buf *msg = NULL;
1388 int ret;
1389
1390 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1391 counter[CM_REQ_COUNTER]);
1392
1393 /* Quick state check to discard duplicate REQs. */
1394 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1395 return;
1396
1397 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1398 if (ret)
1399 return;
1400
1401 spin_lock_irq(&cm_id_priv->lock);
1402 switch (cm_id_priv->id.state) {
1403 case IB_CM_MRA_REQ_SENT:
1404 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1405 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1406 cm_id_priv->private_data,
1407 cm_id_priv->private_data_len);
1408 break;
1409 case IB_CM_TIMEWAIT:
1410 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1411 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1412 break;
1413 default:
1414 goto unlock;
1415 }
1416 spin_unlock_irq(&cm_id_priv->lock);
1417
1418 ret = ib_post_send_mad(msg, NULL);
1419 if (ret)
1420 goto free;
1421 return;
1422
1423unlock: spin_unlock_irq(&cm_id_priv->lock);
1424free: cm_free_msg(msg);
1425}
1426
1427static struct cm_id_private * cm_match_req(struct cm_work *work,
1428 struct cm_id_private *cm_id_priv)
1429{
1430 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1431 struct cm_timewait_info *timewait_info;
1432 struct cm_req_msg *req_msg;
1433
1434 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1435
1436 /* Check for possible duplicate REQ. */
1437 spin_lock_irq(&cm.lock);
1438 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1439 if (timewait_info) {
1440 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1441 timewait_info->work.remote_id);
1442 spin_unlock_irq(&cm.lock);
1443 if (cur_cm_id_priv) {
1444 cm_dup_req_handler(work, cur_cm_id_priv);
1445 cm_deref_id(cur_cm_id_priv);
1446 }
1447 return NULL;
1448 }
1449
1450 /* Check for stale connections. */
1451 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1452 if (timewait_info) {
1453 cm_cleanup_timewait(cm_id_priv->timewait_info);
1454 spin_unlock_irq(&cm.lock);
1455 cm_issue_rej(work->port, work->mad_recv_wc,
1456 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1457 NULL, 0);
1458 return NULL;
1459 }
1460
1461 /* Find matching listen request. */
1462 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1463 req_msg->service_id,
1464 req_msg->private_data);
1465 if (!listen_cm_id_priv) {
1466 cm_cleanup_timewait(cm_id_priv->timewait_info);
1467 spin_unlock_irq(&cm.lock);
1468 cm_issue_rej(work->port, work->mad_recv_wc,
1469 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1470 NULL, 0);
1471 goto out;
1472 }
1473 atomic_inc(&listen_cm_id_priv->refcount);
1474 atomic_inc(&cm_id_priv->refcount);
1475 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1476 atomic_inc(&cm_id_priv->work_count);
1477 spin_unlock_irq(&cm.lock);
1478out:
1479 return listen_cm_id_priv;
1480}
1481
1482/*
1483 * Work-around for inter-subnet connections. If the LIDs are permissive,
1484 * we need to override the LID/SL data in the REQ with the LID information
1485 * in the work completion.
1486 */
1487static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1488{
1489 if (!cm_req_get_primary_subnet_local(req_msg)) {
1490 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1491 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1492 cm_req_set_primary_sl(req_msg, wc->sl);
1493 }
1494
1495 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1496 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1497 }
1498
1499 if (!cm_req_get_alt_subnet_local(req_msg)) {
1500 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1501 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1502 cm_req_set_alt_sl(req_msg, wc->sl);
1503 }
1504
1505 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1506 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1507 }
1508}
1509
1510static int cm_req_handler(struct cm_work *work)
1511{
1512 struct ib_cm_id *cm_id;
1513 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1514 struct cm_req_msg *req_msg;
1515 int ret;
1516
1517 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1518
1519 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1520 if (IS_ERR(cm_id))
1521 return PTR_ERR(cm_id);
1522
1523 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1524 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1525 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1526 work->mad_recv_wc->recv_buf.grh,
1527 &cm_id_priv->av);
1528 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1529 id.local_id);
1530 if (IS_ERR(cm_id_priv->timewait_info)) {
1531 ret = PTR_ERR(cm_id_priv->timewait_info);
1532 goto destroy;
1533 }
1534 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1535 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1536 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1537
1538 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1539 if (!listen_cm_id_priv) {
1540 ret = -EINVAL;
1541 kfree(cm_id_priv->timewait_info);
1542 goto destroy;
1543 }
1544
1545 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1546 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1547 cm_id_priv->id.service_id = req_msg->service_id;
1548 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1549
1550 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1551 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1552 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1553 if (ret) {
1554 ib_get_cached_gid(work->port->cm_dev->ib_device,
1555 work->port->port_num, 0, &work->path[0].sgid);
1556 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1557 &work->path[0].sgid, sizeof work->path[0].sgid,
1558 NULL, 0);
1559 goto rejected;
1560 }
1561 if (req_msg->alt_local_lid) {
1562 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1563 if (ret) {
1564 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1565 &work->path[0].sgid,
1566 sizeof work->path[0].sgid, NULL, 0);
1567 goto rejected;
1568 }
1569 }
1570 cm_id_priv->tid = req_msg->hdr.tid;
1571 cm_id_priv->timeout_ms = cm_convert_to_ms(
1572 cm_req_get_local_resp_timeout(req_msg));
1573 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1574 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1575 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1576 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1577 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1578 cm_id_priv->pkey = req_msg->pkey;
1579 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1580 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1581 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1582 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1583
1584 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1585 cm_process_work(cm_id_priv, work);
1586 cm_deref_id(listen_cm_id_priv);
1587 return 0;
1588
1589rejected:
1590 atomic_dec(&cm_id_priv->refcount);
1591 cm_deref_id(listen_cm_id_priv);
1592destroy:
1593 ib_destroy_cm_id(cm_id);
1594 return ret;
1595}
1596
1597static void cm_format_rep(struct cm_rep_msg *rep_msg,
1598 struct cm_id_private *cm_id_priv,
1599 struct ib_cm_rep_param *param)
1600{
1601 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1602 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1603 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1604 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1605 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1606 rep_msg->resp_resources = param->responder_resources;
1607 rep_msg->initiator_depth = param->initiator_depth;
1608 cm_rep_set_target_ack_delay(rep_msg,
1609 cm_id_priv->av.port->cm_dev->ack_delay);
1610 cm_rep_set_failover(rep_msg, param->failover_accepted);
1611 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1612 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1613 cm_rep_set_srq(rep_msg, param->srq);
1614 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1615
1616 if (param->private_data && param->private_data_len)
1617 memcpy(rep_msg->private_data, param->private_data,
1618 param->private_data_len);
1619}
1620
1621int ib_send_cm_rep(struct ib_cm_id *cm_id,
1622 struct ib_cm_rep_param *param)
1623{
1624 struct cm_id_private *cm_id_priv;
1625 struct ib_mad_send_buf *msg;
1626 struct cm_rep_msg *rep_msg;
1627 unsigned long flags;
1628 int ret;
1629
1630 if (param->private_data &&
1631 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1632 return -EINVAL;
1633
1634 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1635 spin_lock_irqsave(&cm_id_priv->lock, flags);
1636 if (cm_id->state != IB_CM_REQ_RCVD &&
1637 cm_id->state != IB_CM_MRA_REQ_SENT) {
1638 ret = -EINVAL;
1639 goto out;
1640 }
1641
1642 ret = cm_alloc_msg(cm_id_priv, &msg);
1643 if (ret)
1644 goto out;
1645
1646 rep_msg = (struct cm_rep_msg *) msg->mad;
1647 cm_format_rep(rep_msg, cm_id_priv, param);
1648 msg->timeout_ms = cm_id_priv->timeout_ms;
1649 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1650
1651 ret = ib_post_send_mad(msg, NULL);
1652 if (ret) {
1653 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1654 cm_free_msg(msg);
1655 return ret;
1656 }
1657
1658 cm_id->state = IB_CM_REP_SENT;
1659 cm_id_priv->msg = msg;
1660 cm_id_priv->initiator_depth = param->initiator_depth;
1661 cm_id_priv->responder_resources = param->responder_resources;
1662 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1663 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1664
1665out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1666 return ret;
1667}
1668EXPORT_SYMBOL(ib_send_cm_rep);
1669
1670static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1671 struct cm_id_private *cm_id_priv,
1672 const void *private_data,
1673 u8 private_data_len)
1674{
1675 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1676 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1677 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1678
1679 if (private_data && private_data_len)
1680 memcpy(rtu_msg->private_data, private_data, private_data_len);
1681}
1682
1683int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1684 const void *private_data,
1685 u8 private_data_len)
1686{
1687 struct cm_id_private *cm_id_priv;
1688 struct ib_mad_send_buf *msg;
1689 unsigned long flags;
1690 void *data;
1691 int ret;
1692
1693 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1694 return -EINVAL;
1695
1696 data = cm_copy_private_data(private_data, private_data_len);
1697 if (IS_ERR(data))
1698 return PTR_ERR(data);
1699
1700 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1701 spin_lock_irqsave(&cm_id_priv->lock, flags);
1702 if (cm_id->state != IB_CM_REP_RCVD &&
1703 cm_id->state != IB_CM_MRA_REP_SENT) {
1704 ret = -EINVAL;
1705 goto error;
1706 }
1707
1708 ret = cm_alloc_msg(cm_id_priv, &msg);
1709 if (ret)
1710 goto error;
1711
1712 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1713 private_data, private_data_len);
1714
1715 ret = ib_post_send_mad(msg, NULL);
1716 if (ret) {
1717 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1718 cm_free_msg(msg);
1719 kfree(data);
1720 return ret;
1721 }
1722
1723 cm_id->state = IB_CM_ESTABLISHED;
1724 cm_set_private_data(cm_id_priv, data, private_data_len);
1725 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1726 return 0;
1727
1728error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1729 kfree(data);
1730 return ret;
1731}
1732EXPORT_SYMBOL(ib_send_cm_rtu);
1733
1734static void cm_format_rep_event(struct cm_work *work)
1735{
1736 struct cm_rep_msg *rep_msg;
1737 struct ib_cm_rep_event_param *param;
1738
1739 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1740 param = &work->cm_event.param.rep_rcvd;
1741 param->remote_ca_guid = rep_msg->local_ca_guid;
1742 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1743 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1744 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1745 param->responder_resources = rep_msg->initiator_depth;
1746 param->initiator_depth = rep_msg->resp_resources;
1747 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1748 param->failover_accepted = cm_rep_get_failover(rep_msg);
1749 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1750 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1751 param->srq = cm_rep_get_srq(rep_msg);
1752 work->cm_event.private_data = &rep_msg->private_data;
1753}
1754
1755static void cm_dup_rep_handler(struct cm_work *work)
1756{
1757 struct cm_id_private *cm_id_priv;
1758 struct cm_rep_msg *rep_msg;
1759 struct ib_mad_send_buf *msg = NULL;
1760 int ret;
1761
1762 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1763 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1764 rep_msg->local_comm_id);
1765 if (!cm_id_priv)
1766 return;
1767
1768 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1769 counter[CM_REP_COUNTER]);
1770 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1771 if (ret)
1772 goto deref;
1773
1774 spin_lock_irq(&cm_id_priv->lock);
1775 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1776 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1777 cm_id_priv->private_data,
1778 cm_id_priv->private_data_len);
1779 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1780 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1781 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1782 cm_id_priv->private_data,
1783 cm_id_priv->private_data_len);
1784 else
1785 goto unlock;
1786 spin_unlock_irq(&cm_id_priv->lock);
1787
1788 ret = ib_post_send_mad(msg, NULL);
1789 if (ret)
1790 goto free;
1791 goto deref;
1792
1793unlock: spin_unlock_irq(&cm_id_priv->lock);
1794free: cm_free_msg(msg);
1795deref: cm_deref_id(cm_id_priv);
1796}
1797
1798static int cm_rep_handler(struct cm_work *work)
1799{
1800 struct cm_id_private *cm_id_priv;
1801 struct cm_rep_msg *rep_msg;
1802 int ret;
1803
1804 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1805 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1806 if (!cm_id_priv) {
1807 cm_dup_rep_handler(work);
1808 return -EINVAL;
1809 }
1810
1811 cm_format_rep_event(work);
1812
1813 spin_lock_irq(&cm_id_priv->lock);
1814 switch (cm_id_priv->id.state) {
1815 case IB_CM_REQ_SENT:
1816 case IB_CM_MRA_REQ_RCVD:
1817 break;
1818 default:
1819 spin_unlock_irq(&cm_id_priv->lock);
1820 ret = -EINVAL;
1821 goto error;
1822 }
1823
1824 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1825 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1826 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1827
1828 spin_lock(&cm.lock);
1829 /* Check for duplicate REP. */
1830 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1831 spin_unlock(&cm.lock);
1832 spin_unlock_irq(&cm_id_priv->lock);
1833 ret = -EINVAL;
1834 goto error;
1835 }
1836 /* Check for a stale connection. */
1837 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1838 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1839 &cm.remote_id_table);
1840 cm_id_priv->timewait_info->inserted_remote_id = 0;
1841 spin_unlock(&cm.lock);
1842 spin_unlock_irq(&cm_id_priv->lock);
1843 cm_issue_rej(work->port, work->mad_recv_wc,
1844 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1845 NULL, 0);
1846 ret = -EINVAL;
1847 goto error;
1848 }
1849 spin_unlock(&cm.lock);
1850
1851 cm_id_priv->id.state = IB_CM_REP_RCVD;
1852 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1853 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1854 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1855 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1856 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1857 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1858 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1859 cm_id_priv->av.timeout =
1860 cm_ack_timeout(cm_id_priv->target_ack_delay,
1861 cm_id_priv->av.timeout - 1);
1862 cm_id_priv->alt_av.timeout =
1863 cm_ack_timeout(cm_id_priv->target_ack_delay,
1864 cm_id_priv->alt_av.timeout - 1);
1865
1866 /* todo: handle peer_to_peer */
1867
1868 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1869 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1870 if (!ret)
1871 list_add_tail(&work->list, &cm_id_priv->work_list);
1872 spin_unlock_irq(&cm_id_priv->lock);
1873
1874 if (ret)
1875 cm_process_work(cm_id_priv, work);
1876 else
1877 cm_deref_id(cm_id_priv);
1878 return 0;
1879
1880error:
1881 cm_deref_id(cm_id_priv);
1882 return ret;
1883}
1884
1885static int cm_establish_handler(struct cm_work *work)
1886{
1887 struct cm_id_private *cm_id_priv;
1888 int ret;
1889
1890 /* See comment in cm_establish about lookup. */
1891 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1892 if (!cm_id_priv)
1893 return -EINVAL;
1894
1895 spin_lock_irq(&cm_id_priv->lock);
1896 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1897 spin_unlock_irq(&cm_id_priv->lock);
1898 goto out;
1899 }
1900
1901 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1902 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1903 if (!ret)
1904 list_add_tail(&work->list, &cm_id_priv->work_list);
1905 spin_unlock_irq(&cm_id_priv->lock);
1906
1907 if (ret)
1908 cm_process_work(cm_id_priv, work);
1909 else
1910 cm_deref_id(cm_id_priv);
1911 return 0;
1912out:
1913 cm_deref_id(cm_id_priv);
1914 return -EINVAL;
1915}
1916
1917static int cm_rtu_handler(struct cm_work *work)
1918{
1919 struct cm_id_private *cm_id_priv;
1920 struct cm_rtu_msg *rtu_msg;
1921 int ret;
1922
1923 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1924 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1925 rtu_msg->local_comm_id);
1926 if (!cm_id_priv)
1927 return -EINVAL;
1928
1929 work->cm_event.private_data = &rtu_msg->private_data;
1930
1931 spin_lock_irq(&cm_id_priv->lock);
1932 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1933 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1934 spin_unlock_irq(&cm_id_priv->lock);
1935 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1936 counter[CM_RTU_COUNTER]);
1937 goto out;
1938 }
1939 cm_id_priv->id.state = IB_CM_ESTABLISHED;
1940
1941 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1942 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1943 if (!ret)
1944 list_add_tail(&work->list, &cm_id_priv->work_list);
1945 spin_unlock_irq(&cm_id_priv->lock);
1946
1947 if (ret)
1948 cm_process_work(cm_id_priv, work);
1949 else
1950 cm_deref_id(cm_id_priv);
1951 return 0;
1952out:
1953 cm_deref_id(cm_id_priv);
1954 return -EINVAL;
1955}
1956
1957static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1958 struct cm_id_private *cm_id_priv,
1959 const void *private_data,
1960 u8 private_data_len)
1961{
1962 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1963 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1964 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1965 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1966 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1967
1968 if (private_data && private_data_len)
1969 memcpy(dreq_msg->private_data, private_data, private_data_len);
1970}
1971
1972int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1973 const void *private_data,
1974 u8 private_data_len)
1975{
1976 struct cm_id_private *cm_id_priv;
1977 struct ib_mad_send_buf *msg;
1978 unsigned long flags;
1979 int ret;
1980
1981 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1982 return -EINVAL;
1983
1984 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1985 spin_lock_irqsave(&cm_id_priv->lock, flags);
1986 if (cm_id->state != IB_CM_ESTABLISHED) {
1987 ret = -EINVAL;
1988 goto out;
1989 }
1990
1991 if (cm_id->lap_state == IB_CM_LAP_SENT ||
1992 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
1993 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1994
1995 ret = cm_alloc_msg(cm_id_priv, &msg);
1996 if (ret) {
1997 cm_enter_timewait(cm_id_priv);
1998 goto out;
1999 }
2000
2001 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2002 private_data, private_data_len);
2003 msg->timeout_ms = cm_id_priv->timeout_ms;
2004 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2005
2006 ret = ib_post_send_mad(msg, NULL);
2007 if (ret) {
2008 cm_enter_timewait(cm_id_priv);
2009 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2010 cm_free_msg(msg);
2011 return ret;
2012 }
2013
2014 cm_id->state = IB_CM_DREQ_SENT;
2015 cm_id_priv->msg = msg;
2016out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2017 return ret;
2018}
2019EXPORT_SYMBOL(ib_send_cm_dreq);
2020
2021static void cm_format_drep(struct cm_drep_msg *drep_msg,
2022 struct cm_id_private *cm_id_priv,
2023 const void *private_data,
2024 u8 private_data_len)
2025{
2026 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2027 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2028 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2029
2030 if (private_data && private_data_len)
2031 memcpy(drep_msg->private_data, private_data, private_data_len);
2032}
2033
2034int ib_send_cm_drep(struct ib_cm_id *cm_id,
2035 const void *private_data,
2036 u8 private_data_len)
2037{
2038 struct cm_id_private *cm_id_priv;
2039 struct ib_mad_send_buf *msg;
2040 unsigned long flags;
2041 void *data;
2042 int ret;
2043
2044 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2045 return -EINVAL;
2046
2047 data = cm_copy_private_data(private_data, private_data_len);
2048 if (IS_ERR(data))
2049 return PTR_ERR(data);
2050
2051 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2052 spin_lock_irqsave(&cm_id_priv->lock, flags);
2053 if (cm_id->state != IB_CM_DREQ_RCVD) {
2054 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2055 kfree(data);
2056 return -EINVAL;
2057 }
2058
2059 cm_set_private_data(cm_id_priv, data, private_data_len);
2060 cm_enter_timewait(cm_id_priv);
2061
2062 ret = cm_alloc_msg(cm_id_priv, &msg);
2063 if (ret)
2064 goto out;
2065
2066 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2067 private_data, private_data_len);
2068
2069 ret = ib_post_send_mad(msg, NULL);
2070 if (ret) {
2071 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2072 cm_free_msg(msg);
2073 return ret;
2074 }
2075
2076out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2077 return ret;
2078}
2079EXPORT_SYMBOL(ib_send_cm_drep);
2080
2081static int cm_issue_drep(struct cm_port *port,
2082 struct ib_mad_recv_wc *mad_recv_wc)
2083{
2084 struct ib_mad_send_buf *msg = NULL;
2085 struct cm_dreq_msg *dreq_msg;
2086 struct cm_drep_msg *drep_msg;
2087 int ret;
2088
2089 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2090 if (ret)
2091 return ret;
2092
2093 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2094 drep_msg = (struct cm_drep_msg *) msg->mad;
2095
2096 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2097 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2098 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2099
2100 ret = ib_post_send_mad(msg, NULL);
2101 if (ret)
2102 cm_free_msg(msg);
2103
2104 return ret;
2105}
2106
2107static int cm_dreq_handler(struct cm_work *work)
2108{
2109 struct cm_id_private *cm_id_priv;
2110 struct cm_dreq_msg *dreq_msg;
2111 struct ib_mad_send_buf *msg = NULL;
2112 int ret;
2113
2114 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2115 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2116 dreq_msg->local_comm_id);
2117 if (!cm_id_priv) {
2118 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2119 counter[CM_DREQ_COUNTER]);
2120 cm_issue_drep(work->port, work->mad_recv_wc);
2121 return -EINVAL;
2122 }
2123
2124 work->cm_event.private_data = &dreq_msg->private_data;
2125
2126 spin_lock_irq(&cm_id_priv->lock);
2127 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2128 goto unlock;
2129
2130 switch (cm_id_priv->id.state) {
2131 case IB_CM_REP_SENT:
2132 case IB_CM_DREQ_SENT:
2133 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2134 break;
2135 case IB_CM_ESTABLISHED:
2136 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2137 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2138 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2139 break;
2140 case IB_CM_MRA_REP_RCVD:
2141 break;
2142 case IB_CM_TIMEWAIT:
2143 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2144 counter[CM_DREQ_COUNTER]);
2145 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2146 goto unlock;
2147
2148 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2149 cm_id_priv->private_data,
2150 cm_id_priv->private_data_len);
2151 spin_unlock_irq(&cm_id_priv->lock);
2152
2153 if (ib_post_send_mad(msg, NULL))
2154 cm_free_msg(msg);
2155 goto deref;
2156 case IB_CM_DREQ_RCVD:
2157 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2158 counter[CM_DREQ_COUNTER]);
2159 goto unlock;
2160 default:
2161 goto unlock;
2162 }
2163 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2164 cm_id_priv->tid = dreq_msg->hdr.tid;
2165 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2166 if (!ret)
2167 list_add_tail(&work->list, &cm_id_priv->work_list);
2168 spin_unlock_irq(&cm_id_priv->lock);
2169
2170 if (ret)
2171 cm_process_work(cm_id_priv, work);
2172 else
2173 cm_deref_id(cm_id_priv);
2174 return 0;
2175
2176unlock: spin_unlock_irq(&cm_id_priv->lock);
2177deref: cm_deref_id(cm_id_priv);
2178 return -EINVAL;
2179}
2180
2181static int cm_drep_handler(struct cm_work *work)
2182{
2183 struct cm_id_private *cm_id_priv;
2184 struct cm_drep_msg *drep_msg;
2185 int ret;
2186
2187 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2188 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2189 drep_msg->local_comm_id);
2190 if (!cm_id_priv)
2191 return -EINVAL;
2192
2193 work->cm_event.private_data = &drep_msg->private_data;
2194
2195 spin_lock_irq(&cm_id_priv->lock);
2196 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2197 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2198 spin_unlock_irq(&cm_id_priv->lock);
2199 goto out;
2200 }
2201 cm_enter_timewait(cm_id_priv);
2202
2203 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2204 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2205 if (!ret)
2206 list_add_tail(&work->list, &cm_id_priv->work_list);
2207 spin_unlock_irq(&cm_id_priv->lock);
2208
2209 if (ret)
2210 cm_process_work(cm_id_priv, work);
2211 else
2212 cm_deref_id(cm_id_priv);
2213 return 0;
2214out:
2215 cm_deref_id(cm_id_priv);
2216 return -EINVAL;
2217}
2218
2219int ib_send_cm_rej(struct ib_cm_id *cm_id,
2220 enum ib_cm_rej_reason reason,
2221 void *ari,
2222 u8 ari_length,
2223 const void *private_data,
2224 u8 private_data_len)
2225{
2226 struct cm_id_private *cm_id_priv;
2227 struct ib_mad_send_buf *msg;
2228 unsigned long flags;
2229 int ret;
2230
2231 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2232 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2233 return -EINVAL;
2234
2235 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2236
2237 spin_lock_irqsave(&cm_id_priv->lock, flags);
2238 switch (cm_id->state) {
2239 case IB_CM_REQ_SENT:
2240 case IB_CM_MRA_REQ_RCVD:
2241 case IB_CM_REQ_RCVD:
2242 case IB_CM_MRA_REQ_SENT:
2243 case IB_CM_REP_RCVD:
2244 case IB_CM_MRA_REP_SENT:
2245 ret = cm_alloc_msg(cm_id_priv, &msg);
2246 if (!ret)
2247 cm_format_rej((struct cm_rej_msg *) msg->mad,
2248 cm_id_priv, reason, ari, ari_length,
2249 private_data, private_data_len);
2250
2251 cm_reset_to_idle(cm_id_priv);
2252 break;
2253 case IB_CM_REP_SENT:
2254 case IB_CM_MRA_REP_RCVD:
2255 ret = cm_alloc_msg(cm_id_priv, &msg);
2256 if (!ret)
2257 cm_format_rej((struct cm_rej_msg *) msg->mad,
2258 cm_id_priv, reason, ari, ari_length,
2259 private_data, private_data_len);
2260
2261 cm_enter_timewait(cm_id_priv);
2262 break;
2263 default:
2264 ret = -EINVAL;
2265 goto out;
2266 }
2267
2268 if (ret)
2269 goto out;
2270
2271 ret = ib_post_send_mad(msg, NULL);
2272 if (ret)
2273 cm_free_msg(msg);
2274
2275out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2276 return ret;
2277}
2278EXPORT_SYMBOL(ib_send_cm_rej);
2279
2280static void cm_format_rej_event(struct cm_work *work)
2281{
2282 struct cm_rej_msg *rej_msg;
2283 struct ib_cm_rej_event_param *param;
2284
2285 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2286 param = &work->cm_event.param.rej_rcvd;
2287 param->ari = rej_msg->ari;
2288 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2289 param->reason = __be16_to_cpu(rej_msg->reason);
2290 work->cm_event.private_data = &rej_msg->private_data;
2291}
2292
2293static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2294{
2295 struct cm_timewait_info *timewait_info;
2296 struct cm_id_private *cm_id_priv;
2297 __be32 remote_id;
2298
2299 remote_id = rej_msg->local_comm_id;
2300
2301 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2302 spin_lock_irq(&cm.lock);
2303 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2304 remote_id);
2305 if (!timewait_info) {
2306 spin_unlock_irq(&cm.lock);
2307 return NULL;
2308 }
2309 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2310 (timewait_info->work.local_id ^
2311 cm.random_id_operand));
2312 if (cm_id_priv) {
2313 if (cm_id_priv->id.remote_id == remote_id)
2314 atomic_inc(&cm_id_priv->refcount);
2315 else
2316 cm_id_priv = NULL;
2317 }
2318 spin_unlock_irq(&cm.lock);
2319 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2320 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2321 else
2322 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2323
2324 return cm_id_priv;
2325}
2326
2327static int cm_rej_handler(struct cm_work *work)
2328{
2329 struct cm_id_private *cm_id_priv;
2330 struct cm_rej_msg *rej_msg;
2331 int ret;
2332
2333 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2334 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2335 if (!cm_id_priv)
2336 return -EINVAL;
2337
2338 cm_format_rej_event(work);
2339
2340 spin_lock_irq(&cm_id_priv->lock);
2341 switch (cm_id_priv->id.state) {
2342 case IB_CM_REQ_SENT:
2343 case IB_CM_MRA_REQ_RCVD:
2344 case IB_CM_REP_SENT:
2345 case IB_CM_MRA_REP_RCVD:
2346 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2347 /* fall through */
2348 case IB_CM_REQ_RCVD:
2349 case IB_CM_MRA_REQ_SENT:
2350 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2351 cm_enter_timewait(cm_id_priv);
2352 else
2353 cm_reset_to_idle(cm_id_priv);
2354 break;
2355 case IB_CM_DREQ_SENT:
2356 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2357 /* fall through */
2358 case IB_CM_REP_RCVD:
2359 case IB_CM_MRA_REP_SENT:
2360 cm_enter_timewait(cm_id_priv);
2361 break;
2362 case IB_CM_ESTABLISHED:
2363 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2364 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2365 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2366 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2367 cm_id_priv->msg);
2368 cm_enter_timewait(cm_id_priv);
2369 break;
2370 }
2371 /* fall through */
2372 default:
2373 spin_unlock_irq(&cm_id_priv->lock);
2374 ret = -EINVAL;
2375 goto out;
2376 }
2377
2378 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2379 if (!ret)
2380 list_add_tail(&work->list, &cm_id_priv->work_list);
2381 spin_unlock_irq(&cm_id_priv->lock);
2382
2383 if (ret)
2384 cm_process_work(cm_id_priv, work);
2385 else
2386 cm_deref_id(cm_id_priv);
2387 return 0;
2388out:
2389 cm_deref_id(cm_id_priv);
2390 return -EINVAL;
2391}
2392
2393int ib_send_cm_mra(struct ib_cm_id *cm_id,
2394 u8 service_timeout,
2395 const void *private_data,
2396 u8 private_data_len)
2397{
2398 struct cm_id_private *cm_id_priv;
2399 struct ib_mad_send_buf *msg;
2400 enum ib_cm_state cm_state;
2401 enum ib_cm_lap_state lap_state;
2402 enum cm_msg_response msg_response;
2403 void *data;
2404 unsigned long flags;
2405 int ret;
2406
2407 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2408 return -EINVAL;
2409
2410 data = cm_copy_private_data(private_data, private_data_len);
2411 if (IS_ERR(data))
2412 return PTR_ERR(data);
2413
2414 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2415
2416 spin_lock_irqsave(&cm_id_priv->lock, flags);
2417 switch(cm_id_priv->id.state) {
2418 case IB_CM_REQ_RCVD:
2419 cm_state = IB_CM_MRA_REQ_SENT;
2420 lap_state = cm_id->lap_state;
2421 msg_response = CM_MSG_RESPONSE_REQ;
2422 break;
2423 case IB_CM_REP_RCVD:
2424 cm_state = IB_CM_MRA_REP_SENT;
2425 lap_state = cm_id->lap_state;
2426 msg_response = CM_MSG_RESPONSE_REP;
2427 break;
2428 case IB_CM_ESTABLISHED:
2429 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2430 cm_state = cm_id->state;
2431 lap_state = IB_CM_MRA_LAP_SENT;
2432 msg_response = CM_MSG_RESPONSE_OTHER;
2433 break;
2434 }
2435 default:
2436 ret = -EINVAL;
2437 goto error1;
2438 }
2439
2440 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2441 ret = cm_alloc_msg(cm_id_priv, &msg);
2442 if (ret)
2443 goto error1;
2444
2445 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2446 msg_response, service_timeout,
2447 private_data, private_data_len);
2448 ret = ib_post_send_mad(msg, NULL);
2449 if (ret)
2450 goto error2;
2451 }
2452
2453 cm_id->state = cm_state;
2454 cm_id->lap_state = lap_state;
2455 cm_id_priv->service_timeout = service_timeout;
2456 cm_set_private_data(cm_id_priv, data, private_data_len);
2457 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2458 return 0;
2459
2460error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2461 kfree(data);
2462 return ret;
2463
2464error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2465 kfree(data);
2466 cm_free_msg(msg);
2467 return ret;
2468}
2469EXPORT_SYMBOL(ib_send_cm_mra);
2470
2471static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2472{
2473 switch (cm_mra_get_msg_mraed(mra_msg)) {
2474 case CM_MSG_RESPONSE_REQ:
2475 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2476 case CM_MSG_RESPONSE_REP:
2477 case CM_MSG_RESPONSE_OTHER:
2478 return cm_acquire_id(mra_msg->remote_comm_id,
2479 mra_msg->local_comm_id);
2480 default:
2481 return NULL;
2482 }
2483}
2484
2485static int cm_mra_handler(struct cm_work *work)
2486{
2487 struct cm_id_private *cm_id_priv;
2488 struct cm_mra_msg *mra_msg;
2489 int timeout, ret;
2490
2491 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2492 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2493 if (!cm_id_priv)
2494 return -EINVAL;
2495
2496 work->cm_event.private_data = &mra_msg->private_data;
2497 work->cm_event.param.mra_rcvd.service_timeout =
2498 cm_mra_get_service_timeout(mra_msg);
2499 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2500 cm_convert_to_ms(cm_id_priv->av.timeout);
2501
2502 spin_lock_irq(&cm_id_priv->lock);
2503 switch (cm_id_priv->id.state) {
2504 case IB_CM_REQ_SENT:
2505 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2506 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2507 cm_id_priv->msg, timeout))
2508 goto out;
2509 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2510 break;
2511 case IB_CM_REP_SENT:
2512 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2513 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2514 cm_id_priv->msg, timeout))
2515 goto out;
2516 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2517 break;
2518 case IB_CM_ESTABLISHED:
2519 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2520 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2521 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2522 cm_id_priv->msg, timeout)) {
2523 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2524 atomic_long_inc(&work->port->
2525 counter_group[CM_RECV_DUPLICATES].
2526 counter[CM_MRA_COUNTER]);
2527 goto out;
2528 }
2529 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2530 break;
2531 case IB_CM_MRA_REQ_RCVD:
2532 case IB_CM_MRA_REP_RCVD:
2533 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2534 counter[CM_MRA_COUNTER]);
2535 /* fall through */
2536 default:
2537 goto out;
2538 }
2539
2540 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2541 cm_id_priv->id.state;
2542 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2543 if (!ret)
2544 list_add_tail(&work->list, &cm_id_priv->work_list);
2545 spin_unlock_irq(&cm_id_priv->lock);
2546
2547 if (ret)
2548 cm_process_work(cm_id_priv, work);
2549 else
2550 cm_deref_id(cm_id_priv);
2551 return 0;
2552out:
2553 spin_unlock_irq(&cm_id_priv->lock);
2554 cm_deref_id(cm_id_priv);
2555 return -EINVAL;
2556}
2557
2558static void cm_format_lap(struct cm_lap_msg *lap_msg,
2559 struct cm_id_private *cm_id_priv,
2560 struct ib_sa_path_rec *alternate_path,
2561 const void *private_data,
2562 u8 private_data_len)
2563{
2564 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2565 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2566 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2567 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2568 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2569 /* todo: need remote CM response timeout */
2570 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2571 lap_msg->alt_local_lid = alternate_path->slid;
2572 lap_msg->alt_remote_lid = alternate_path->dlid;
2573 lap_msg->alt_local_gid = alternate_path->sgid;
2574 lap_msg->alt_remote_gid = alternate_path->dgid;
2575 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2576 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2577 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2578 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2579 cm_lap_set_sl(lap_msg, alternate_path->sl);
2580 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2581 cm_lap_set_local_ack_timeout(lap_msg,
2582 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2583 alternate_path->packet_life_time));
2584
2585 if (private_data && private_data_len)
2586 memcpy(lap_msg->private_data, private_data, private_data_len);
2587}
2588
2589int ib_send_cm_lap(struct ib_cm_id *cm_id,
2590 struct ib_sa_path_rec *alternate_path,
2591 const void *private_data,
2592 u8 private_data_len)
2593{
2594 struct cm_id_private *cm_id_priv;
2595 struct ib_mad_send_buf *msg;
2596 unsigned long flags;
2597 int ret;
2598
2599 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2600 return -EINVAL;
2601
2602 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2603 spin_lock_irqsave(&cm_id_priv->lock, flags);
2604 if (cm_id->state != IB_CM_ESTABLISHED ||
2605 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2606 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2607 ret = -EINVAL;
2608 goto out;
2609 }
2610
2611 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2612 if (ret)
2613 goto out;
2614 cm_id_priv->alt_av.timeout =
2615 cm_ack_timeout(cm_id_priv->target_ack_delay,
2616 cm_id_priv->alt_av.timeout - 1);
2617
2618 ret = cm_alloc_msg(cm_id_priv, &msg);
2619 if (ret)
2620 goto out;
2621
2622 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2623 alternate_path, private_data, private_data_len);
2624 msg->timeout_ms = cm_id_priv->timeout_ms;
2625 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2626
2627 ret = ib_post_send_mad(msg, NULL);
2628 if (ret) {
2629 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2630 cm_free_msg(msg);
2631 return ret;
2632 }
2633
2634 cm_id->lap_state = IB_CM_LAP_SENT;
2635 cm_id_priv->msg = msg;
2636
2637out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2638 return ret;
2639}
2640EXPORT_SYMBOL(ib_send_cm_lap);
2641
2642static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2643 struct ib_sa_path_rec *path,
2644 struct cm_lap_msg *lap_msg)
2645{
2646 memset(path, 0, sizeof *path);
2647 path->dgid = lap_msg->alt_local_gid;
2648 path->sgid = lap_msg->alt_remote_gid;
2649 path->dlid = lap_msg->alt_local_lid;
2650 path->slid = lap_msg->alt_remote_lid;
2651 path->flow_label = cm_lap_get_flow_label(lap_msg);
2652 path->hop_limit = lap_msg->alt_hop_limit;
2653 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2654 path->reversible = 1;
2655 path->pkey = cm_id_priv->pkey;
2656 path->sl = cm_lap_get_sl(lap_msg);
2657 path->mtu_selector = IB_SA_EQ;
2658 path->mtu = cm_id_priv->path_mtu;
2659 path->rate_selector = IB_SA_EQ;
2660 path->rate = cm_lap_get_packet_rate(lap_msg);
2661 path->packet_life_time_selector = IB_SA_EQ;
2662 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2663 path->packet_life_time -= (path->packet_life_time > 0);
2664}
2665
2666static int cm_lap_handler(struct cm_work *work)
2667{
2668 struct cm_id_private *cm_id_priv;
2669 struct cm_lap_msg *lap_msg;
2670 struct ib_cm_lap_event_param *param;
2671 struct ib_mad_send_buf *msg = NULL;
2672 int ret;
2673
2674 /* todo: verify LAP request and send reject APR if invalid. */
2675 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2676 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2677 lap_msg->local_comm_id);
2678 if (!cm_id_priv)
2679 return -EINVAL;
2680
2681 param = &work->cm_event.param.lap_rcvd;
2682 param->alternate_path = &work->path[0];
2683 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2684 work->cm_event.private_data = &lap_msg->private_data;
2685
2686 spin_lock_irq(&cm_id_priv->lock);
2687 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2688 goto unlock;
2689
2690 switch (cm_id_priv->id.lap_state) {
2691 case IB_CM_LAP_UNINIT:
2692 case IB_CM_LAP_IDLE:
2693 break;
2694 case IB_CM_MRA_LAP_SENT:
2695 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2696 counter[CM_LAP_COUNTER]);
2697 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2698 goto unlock;
2699
2700 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2701 CM_MSG_RESPONSE_OTHER,
2702 cm_id_priv->service_timeout,
2703 cm_id_priv->private_data,
2704 cm_id_priv->private_data_len);
2705 spin_unlock_irq(&cm_id_priv->lock);
2706
2707 if (ib_post_send_mad(msg, NULL))
2708 cm_free_msg(msg);
2709 goto deref;
2710 case IB_CM_LAP_RCVD:
2711 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2712 counter[CM_LAP_COUNTER]);
2713 goto unlock;
2714 default:
2715 goto unlock;
2716 }
2717
2718 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2719 cm_id_priv->tid = lap_msg->hdr.tid;
2720 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2721 work->mad_recv_wc->recv_buf.grh,
2722 &cm_id_priv->av);
2723 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2724 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2725 if (!ret)
2726 list_add_tail(&work->list, &cm_id_priv->work_list);
2727 spin_unlock_irq(&cm_id_priv->lock);
2728
2729 if (ret)
2730 cm_process_work(cm_id_priv, work);
2731 else
2732 cm_deref_id(cm_id_priv);
2733 return 0;
2734
2735unlock: spin_unlock_irq(&cm_id_priv->lock);
2736deref: cm_deref_id(cm_id_priv);
2737 return -EINVAL;
2738}
2739
2740static void cm_format_apr(struct cm_apr_msg *apr_msg,
2741 struct cm_id_private *cm_id_priv,
2742 enum ib_cm_apr_status status,
2743 void *info,
2744 u8 info_length,
2745 const void *private_data,
2746 u8 private_data_len)
2747{
2748 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2749 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2750 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2751 apr_msg->ap_status = (u8) status;
2752
2753 if (info && info_length) {
2754 apr_msg->info_length = info_length;
2755 memcpy(apr_msg->info, info, info_length);
2756 }
2757
2758 if (private_data && private_data_len)
2759 memcpy(apr_msg->private_data, private_data, private_data_len);
2760}
2761
2762int ib_send_cm_apr(struct ib_cm_id *cm_id,
2763 enum ib_cm_apr_status status,
2764 void *info,
2765 u8 info_length,
2766 const void *private_data,
2767 u8 private_data_len)
2768{
2769 struct cm_id_private *cm_id_priv;
2770 struct ib_mad_send_buf *msg;
2771 unsigned long flags;
2772 int ret;
2773
2774 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2775 (info && info_length > IB_CM_APR_INFO_LENGTH))
2776 return -EINVAL;
2777
2778 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2779 spin_lock_irqsave(&cm_id_priv->lock, flags);
2780 if (cm_id->state != IB_CM_ESTABLISHED ||
2781 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2782 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2783 ret = -EINVAL;
2784 goto out;
2785 }
2786
2787 ret = cm_alloc_msg(cm_id_priv, &msg);
2788 if (ret)
2789 goto out;
2790
2791 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2792 info, info_length, private_data, private_data_len);
2793 ret = ib_post_send_mad(msg, NULL);
2794 if (ret) {
2795 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2796 cm_free_msg(msg);
2797 return ret;
2798 }
2799
2800 cm_id->lap_state = IB_CM_LAP_IDLE;
2801out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2802 return ret;
2803}
2804EXPORT_SYMBOL(ib_send_cm_apr);
2805
2806static int cm_apr_handler(struct cm_work *work)
2807{
2808 struct cm_id_private *cm_id_priv;
2809 struct cm_apr_msg *apr_msg;
2810 int ret;
2811
2812 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2813 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2814 apr_msg->local_comm_id);
2815 if (!cm_id_priv)
2816 return -EINVAL; /* Unmatched reply. */
2817
2818 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2819 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2820 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2821 work->cm_event.private_data = &apr_msg->private_data;
2822
2823 spin_lock_irq(&cm_id_priv->lock);
2824 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2825 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2826 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2827 spin_unlock_irq(&cm_id_priv->lock);
2828 goto out;
2829 }
2830 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2831 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2832 cm_id_priv->msg = NULL;
2833
2834 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2835 if (!ret)
2836 list_add_tail(&work->list, &cm_id_priv->work_list);
2837 spin_unlock_irq(&cm_id_priv->lock);
2838
2839 if (ret)
2840 cm_process_work(cm_id_priv, work);
2841 else
2842 cm_deref_id(cm_id_priv);
2843 return 0;
2844out:
2845 cm_deref_id(cm_id_priv);
2846 return -EINVAL;
2847}
2848
2849static int cm_timewait_handler(struct cm_work *work)
2850{
2851 struct cm_timewait_info *timewait_info;
2852 struct cm_id_private *cm_id_priv;
2853 int ret;
2854
2855 timewait_info = (struct cm_timewait_info *)work;
2856 spin_lock_irq(&cm.lock);
2857 list_del(&timewait_info->list);
2858 spin_unlock_irq(&cm.lock);
2859
2860 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2861 timewait_info->work.remote_id);
2862 if (!cm_id_priv)
2863 return -EINVAL;
2864
2865 spin_lock_irq(&cm_id_priv->lock);
2866 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2867 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2868 spin_unlock_irq(&cm_id_priv->lock);
2869 goto out;
2870 }
2871 cm_id_priv->id.state = IB_CM_IDLE;
2872 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2873 if (!ret)
2874 list_add_tail(&work->list, &cm_id_priv->work_list);
2875 spin_unlock_irq(&cm_id_priv->lock);
2876
2877 if (ret)
2878 cm_process_work(cm_id_priv, work);
2879 else
2880 cm_deref_id(cm_id_priv);
2881 return 0;
2882out:
2883 cm_deref_id(cm_id_priv);
2884 return -EINVAL;
2885}
2886
2887static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2888 struct cm_id_private *cm_id_priv,
2889 struct ib_cm_sidr_req_param *param)
2890{
2891 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2892 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2893 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2894 sidr_req_msg->pkey = param->path->pkey;
2895 sidr_req_msg->service_id = param->service_id;
2896
2897 if (param->private_data && param->private_data_len)
2898 memcpy(sidr_req_msg->private_data, param->private_data,
2899 param->private_data_len);
2900}
2901
2902int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2903 struct ib_cm_sidr_req_param *param)
2904{
2905 struct cm_id_private *cm_id_priv;
2906 struct ib_mad_send_buf *msg;
2907 unsigned long flags;
2908 int ret;
2909
2910 if (!param->path || (param->private_data &&
2911 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2912 return -EINVAL;
2913
2914 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2915 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2916 if (ret)
2917 goto out;
2918
2919 cm_id->service_id = param->service_id;
2920 cm_id->service_mask = ~cpu_to_be64(0);
2921 cm_id_priv->timeout_ms = param->timeout_ms;
2922 cm_id_priv->max_cm_retries = param->max_cm_retries;
2923 ret = cm_alloc_msg(cm_id_priv, &msg);
2924 if (ret)
2925 goto out;
2926
2927 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2928 param);
2929 msg->timeout_ms = cm_id_priv->timeout_ms;
2930 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2931
2932 spin_lock_irqsave(&cm_id_priv->lock, flags);
2933 if (cm_id->state == IB_CM_IDLE)
2934 ret = ib_post_send_mad(msg, NULL);
2935 else
2936 ret = -EINVAL;
2937
2938 if (ret) {
2939 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2940 cm_free_msg(msg);
2941 goto out;
2942 }
2943 cm_id->state = IB_CM_SIDR_REQ_SENT;
2944 cm_id_priv->msg = msg;
2945 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2946out:
2947 return ret;
2948}
2949EXPORT_SYMBOL(ib_send_cm_sidr_req);
2950
2951static void cm_format_sidr_req_event(struct cm_work *work,
2952 struct ib_cm_id *listen_id)
2953{
2954 struct cm_sidr_req_msg *sidr_req_msg;
2955 struct ib_cm_sidr_req_event_param *param;
2956
2957 sidr_req_msg = (struct cm_sidr_req_msg *)
2958 work->mad_recv_wc->recv_buf.mad;
2959 param = &work->cm_event.param.sidr_req_rcvd;
2960 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2961 param->listen_id = listen_id;
2962 param->port = work->port->port_num;
2963 work->cm_event.private_data = &sidr_req_msg->private_data;
2964}
2965
2966static int cm_sidr_req_handler(struct cm_work *work)
2967{
2968 struct ib_cm_id *cm_id;
2969 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2970 struct cm_sidr_req_msg *sidr_req_msg;
2971 struct ib_wc *wc;
2972
2973 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
2974 if (IS_ERR(cm_id))
2975 return PTR_ERR(cm_id);
2976 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2977
2978 /* Record SGID/SLID and request ID for lookup. */
2979 sidr_req_msg = (struct cm_sidr_req_msg *)
2980 work->mad_recv_wc->recv_buf.mad;
2981 wc = work->mad_recv_wc->wc;
2982 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2983 cm_id_priv->av.dgid.global.interface_id = 0;
2984 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2985 work->mad_recv_wc->recv_buf.grh,
2986 &cm_id_priv->av);
2987 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2988 cm_id_priv->tid = sidr_req_msg->hdr.tid;
2989 atomic_inc(&cm_id_priv->work_count);
2990
2991 spin_lock_irq(&cm.lock);
2992 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2993 if (cur_cm_id_priv) {
2994 spin_unlock_irq(&cm.lock);
2995 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2996 counter[CM_SIDR_REQ_COUNTER]);
2997 goto out; /* Duplicate message. */
2998 }
2999 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3000 cur_cm_id_priv = cm_find_listen(cm_id->device,
3001 sidr_req_msg->service_id,
3002 sidr_req_msg->private_data);
3003 if (!cur_cm_id_priv) {
3004 spin_unlock_irq(&cm.lock);
3005 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3006 goto out; /* No match. */
3007 }
3008 atomic_inc(&cur_cm_id_priv->refcount);
3009 atomic_inc(&cm_id_priv->refcount);
3010 spin_unlock_irq(&cm.lock);
3011
3012 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3013 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3014 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3015 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3016
3017 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3018 cm_process_work(cm_id_priv, work);
3019 cm_deref_id(cur_cm_id_priv);
3020 return 0;
3021out:
3022 ib_destroy_cm_id(&cm_id_priv->id);
3023 return -EINVAL;
3024}
3025
3026static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3027 struct cm_id_private *cm_id_priv,
3028 struct ib_cm_sidr_rep_param *param)
3029{
3030 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3031 cm_id_priv->tid);
3032 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3033 sidr_rep_msg->status = param->status;
3034 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3035 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3036 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3037
3038 if (param->info && param->info_length)
3039 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3040
3041 if (param->private_data && param->private_data_len)
3042 memcpy(sidr_rep_msg->private_data, param->private_data,
3043 param->private_data_len);
3044}
3045
3046int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3047 struct ib_cm_sidr_rep_param *param)
3048{
3049 struct cm_id_private *cm_id_priv;
3050 struct ib_mad_send_buf *msg;
3051 unsigned long flags;
3052 int ret;
3053
3054 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3055 (param->private_data &&
3056 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3057 return -EINVAL;
3058
3059 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3060 spin_lock_irqsave(&cm_id_priv->lock, flags);
3061 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3062 ret = -EINVAL;
3063 goto error;
3064 }
3065
3066 ret = cm_alloc_msg(cm_id_priv, &msg);
3067 if (ret)
3068 goto error;
3069
3070 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3071 param);
3072 ret = ib_post_send_mad(msg, NULL);
3073 if (ret) {
3074 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3075 cm_free_msg(msg);
3076 return ret;
3077 }
3078 cm_id->state = IB_CM_IDLE;
3079 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3080
3081 spin_lock_irqsave(&cm.lock, flags);
3082 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3083 spin_unlock_irqrestore(&cm.lock, flags);
3084 return 0;
3085
3086error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3087 return ret;
3088}
3089EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3090
3091static void cm_format_sidr_rep_event(struct cm_work *work)
3092{
3093 struct cm_sidr_rep_msg *sidr_rep_msg;
3094 struct ib_cm_sidr_rep_event_param *param;
3095
3096 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3097 work->mad_recv_wc->recv_buf.mad;
3098 param = &work->cm_event.param.sidr_rep_rcvd;
3099 param->status = sidr_rep_msg->status;
3100 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3101 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3102 param->info = &sidr_rep_msg->info;
3103 param->info_len = sidr_rep_msg->info_length;
3104 work->cm_event.private_data = &sidr_rep_msg->private_data;
3105}
3106
3107static int cm_sidr_rep_handler(struct cm_work *work)
3108{
3109 struct cm_sidr_rep_msg *sidr_rep_msg;
3110 struct cm_id_private *cm_id_priv;
3111
3112 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3113 work->mad_recv_wc->recv_buf.mad;
3114 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3115 if (!cm_id_priv)
3116 return -EINVAL; /* Unmatched reply. */
3117
3118 spin_lock_irq(&cm_id_priv->lock);
3119 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3120 spin_unlock_irq(&cm_id_priv->lock);
3121 goto out;
3122 }
3123 cm_id_priv->id.state = IB_CM_IDLE;
3124 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3125 spin_unlock_irq(&cm_id_priv->lock);
3126
3127 cm_format_sidr_rep_event(work);
3128 cm_process_work(cm_id_priv, work);
3129 return 0;
3130out:
3131 cm_deref_id(cm_id_priv);
3132 return -EINVAL;
3133}
3134
3135static void cm_process_send_error(struct ib_mad_send_buf *msg,
3136 enum ib_wc_status wc_status)
3137{
3138 struct cm_id_private *cm_id_priv;
3139 struct ib_cm_event cm_event;
3140 enum ib_cm_state state;
3141 int ret;
3142
3143 memset(&cm_event, 0, sizeof cm_event);
3144 cm_id_priv = msg->context[0];
3145
3146 /* Discard old sends or ones without a response. */
3147 spin_lock_irq(&cm_id_priv->lock);
3148 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3149 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3150 goto discard;
3151
3152 switch (state) {
3153 case IB_CM_REQ_SENT:
3154 case IB_CM_MRA_REQ_RCVD:
3155 cm_reset_to_idle(cm_id_priv);
3156 cm_event.event = IB_CM_REQ_ERROR;
3157 break;
3158 case IB_CM_REP_SENT:
3159 case IB_CM_MRA_REP_RCVD:
3160 cm_reset_to_idle(cm_id_priv);
3161 cm_event.event = IB_CM_REP_ERROR;
3162 break;
3163 case IB_CM_DREQ_SENT:
3164 cm_enter_timewait(cm_id_priv);
3165 cm_event.event = IB_CM_DREQ_ERROR;
3166 break;
3167 case IB_CM_SIDR_REQ_SENT:
3168 cm_id_priv->id.state = IB_CM_IDLE;
3169 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3170 break;
3171 default:
3172 goto discard;
3173 }
3174 spin_unlock_irq(&cm_id_priv->lock);
3175 cm_event.param.send_status = wc_status;
3176
3177 /* No other events can occur on the cm_id at this point. */
3178 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3179 cm_free_msg(msg);
3180 if (ret)
3181 ib_destroy_cm_id(&cm_id_priv->id);
3182 return;
3183discard:
3184 spin_unlock_irq(&cm_id_priv->lock);
3185 cm_free_msg(msg);
3186}
3187
3188static void cm_send_handler(struct ib_mad_agent *mad_agent,
3189 struct ib_mad_send_wc *mad_send_wc)
3190{
3191 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3192 struct cm_port *port;
3193 u16 attr_index;
3194
3195 port = mad_agent->context;
3196 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3197 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3198
3199 /*
3200 * If the send was in response to a received message (context[0] is not
3201 * set to a cm_id), and is not a REJ, then it is a send that was
3202 * manually retried.
3203 */
3204 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3205 msg->retries = 1;
3206
3207 atomic_long_add(1 + msg->retries,
3208 &port->counter_group[CM_XMIT].counter[attr_index]);
3209 if (msg->retries)
3210 atomic_long_add(msg->retries,
3211 &port->counter_group[CM_XMIT_RETRIES].
3212 counter[attr_index]);
3213
3214 switch (mad_send_wc->status) {
3215 case IB_WC_SUCCESS:
3216 case IB_WC_WR_FLUSH_ERR:
3217 cm_free_msg(msg);
3218 break;
3219 default:
3220 if (msg->context[0] && msg->context[1])
3221 cm_process_send_error(msg, mad_send_wc->status);
3222 else
3223 cm_free_msg(msg);
3224 break;
3225 }
3226}
3227
3228static void cm_work_handler(struct work_struct *_work)
3229{
3230 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3231 int ret;
3232
3233 switch (work->cm_event.event) {
3234 case IB_CM_REQ_RECEIVED:
3235 ret = cm_req_handler(work);
3236 break;
3237 case IB_CM_MRA_RECEIVED:
3238 ret = cm_mra_handler(work);
3239 break;
3240 case IB_CM_REJ_RECEIVED:
3241 ret = cm_rej_handler(work);
3242 break;
3243 case IB_CM_REP_RECEIVED:
3244 ret = cm_rep_handler(work);
3245 break;
3246 case IB_CM_RTU_RECEIVED:
3247 ret = cm_rtu_handler(work);
3248 break;
3249 case IB_CM_USER_ESTABLISHED:
3250 ret = cm_establish_handler(work);
3251 break;
3252 case IB_CM_DREQ_RECEIVED:
3253 ret = cm_dreq_handler(work);
3254 break;
3255 case IB_CM_DREP_RECEIVED:
3256 ret = cm_drep_handler(work);
3257 break;
3258 case IB_CM_SIDR_REQ_RECEIVED:
3259 ret = cm_sidr_req_handler(work);
3260 break;
3261 case IB_CM_SIDR_REP_RECEIVED:
3262 ret = cm_sidr_rep_handler(work);
3263 break;
3264 case IB_CM_LAP_RECEIVED:
3265 ret = cm_lap_handler(work);
3266 break;
3267 case IB_CM_APR_RECEIVED:
3268 ret = cm_apr_handler(work);
3269 break;
3270 case IB_CM_TIMEWAIT_EXIT:
3271 ret = cm_timewait_handler(work);
3272 break;
3273 default:
3274 ret = -EINVAL;
3275 break;
3276 }
3277 if (ret)
3278 cm_free_work(work);
3279}
3280
3281static int cm_establish(struct ib_cm_id *cm_id)
3282{
3283 struct cm_id_private *cm_id_priv;
3284 struct cm_work *work;
3285 unsigned long flags;
3286 int ret = 0;
3287
3288 work = kmalloc(sizeof *work, GFP_ATOMIC);
3289 if (!work)
3290 return -ENOMEM;
3291
3292 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3293 spin_lock_irqsave(&cm_id_priv->lock, flags);
3294 switch (cm_id->state)
3295 {
3296 case IB_CM_REP_SENT:
3297 case IB_CM_MRA_REP_RCVD:
3298 cm_id->state = IB_CM_ESTABLISHED;
3299 break;
3300 case IB_CM_ESTABLISHED:
3301 ret = -EISCONN;
3302 break;
3303 default:
3304 ret = -EINVAL;
3305 break;
3306 }
3307 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3308
3309 if (ret) {
3310 kfree(work);
3311 goto out;
3312 }
3313
3314 /*
3315 * The CM worker thread may try to destroy the cm_id before it
3316 * can execute this work item. To prevent potential deadlock,
3317 * we need to find the cm_id once we're in the context of the
3318 * worker thread, rather than holding a reference on it.
3319 */
3320 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3321 work->local_id = cm_id->local_id;
3322 work->remote_id = cm_id->remote_id;
3323 work->mad_recv_wc = NULL;
3324 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3325 queue_delayed_work(cm.wq, &work->work, 0);
3326out:
3327 return ret;
3328}
3329
3330static int cm_migrate(struct ib_cm_id *cm_id)
3331{
3332 struct cm_id_private *cm_id_priv;
3333 unsigned long flags;
3334 int ret = 0;
3335
3336 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3337 spin_lock_irqsave(&cm_id_priv->lock, flags);
3338 if (cm_id->state == IB_CM_ESTABLISHED &&
3339 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3340 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3341 cm_id->lap_state = IB_CM_LAP_IDLE;
3342 cm_id_priv->av = cm_id_priv->alt_av;
3343 } else
3344 ret = -EINVAL;
3345 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3346
3347 return ret;
3348}
3349
3350int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3351{
3352 int ret;
3353
3354 switch (event) {
3355 case IB_EVENT_COMM_EST:
3356 ret = cm_establish(cm_id);
3357 break;
3358 case IB_EVENT_PATH_MIG:
3359 ret = cm_migrate(cm_id);
3360 break;
3361 default:
3362 ret = -EINVAL;
3363 }
3364 return ret;
3365}
3366EXPORT_SYMBOL(ib_cm_notify);
3367
3368static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3369 struct ib_mad_recv_wc *mad_recv_wc)
3370{
3371 struct cm_port *port = mad_agent->context;
3372 struct cm_work *work;
3373 enum ib_cm_event_type event;
3374 u16 attr_id;
3375 int paths = 0;
3376
3377 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3378 case CM_REQ_ATTR_ID:
3379 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3380 alt_local_lid != 0);
3381 event = IB_CM_REQ_RECEIVED;
3382 break;
3383 case CM_MRA_ATTR_ID:
3384 event = IB_CM_MRA_RECEIVED;
3385 break;
3386 case CM_REJ_ATTR_ID:
3387 event = IB_CM_REJ_RECEIVED;
3388 break;
3389 case CM_REP_ATTR_ID:
3390 event = IB_CM_REP_RECEIVED;
3391 break;
3392 case CM_RTU_ATTR_ID:
3393 event = IB_CM_RTU_RECEIVED;
3394 break;
3395 case CM_DREQ_ATTR_ID:
3396 event = IB_CM_DREQ_RECEIVED;
3397 break;
3398 case CM_DREP_ATTR_ID:
3399 event = IB_CM_DREP_RECEIVED;
3400 break;
3401 case CM_SIDR_REQ_ATTR_ID:
3402 event = IB_CM_SIDR_REQ_RECEIVED;
3403 break;
3404 case CM_SIDR_REP_ATTR_ID:
3405 event = IB_CM_SIDR_REP_RECEIVED;
3406 break;
3407 case CM_LAP_ATTR_ID:
3408 paths = 1;
3409 event = IB_CM_LAP_RECEIVED;
3410 break;
3411 case CM_APR_ATTR_ID:
3412 event = IB_CM_APR_RECEIVED;
3413 break;
3414 default:
3415 ib_free_recv_mad(mad_recv_wc);
3416 return;
3417 }
3418
3419 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3420 atomic_long_inc(&port->counter_group[CM_RECV].
3421 counter[attr_id - CM_ATTR_ID_OFFSET]);
3422
3423 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3424 GFP_KERNEL);
3425 if (!work) {
3426 ib_free_recv_mad(mad_recv_wc);
3427 return;
3428 }
3429
3430 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3431 work->cm_event.event = event;
3432 work->mad_recv_wc = mad_recv_wc;
3433 work->port = port;
3434 queue_delayed_work(cm.wq, &work->work, 0);
3435}
3436
3437static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3438 struct ib_qp_attr *qp_attr,
3439 int *qp_attr_mask)
3440{
3441 unsigned long flags;
3442 int ret;
3443
3444 spin_lock_irqsave(&cm_id_priv->lock, flags);
3445 switch (cm_id_priv->id.state) {
3446 case IB_CM_REQ_SENT:
3447 case IB_CM_MRA_REQ_RCVD:
3448 case IB_CM_REQ_RCVD:
3449 case IB_CM_MRA_REQ_SENT:
3450 case IB_CM_REP_RCVD:
3451 case IB_CM_MRA_REP_SENT:
3452 case IB_CM_REP_SENT:
3453 case IB_CM_MRA_REP_RCVD:
3454 case IB_CM_ESTABLISHED:
3455 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3456 IB_QP_PKEY_INDEX | IB_QP_PORT;
3457 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3458 if (cm_id_priv->responder_resources)
3459 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3460 IB_ACCESS_REMOTE_ATOMIC;
3461 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3462 qp_attr->port_num = cm_id_priv->av.port->port_num;
3463 ret = 0;
3464 break;
3465 default:
3466 ret = -EINVAL;
3467 break;
3468 }
3469 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3470 return ret;
3471}
3472
3473static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3474 struct ib_qp_attr *qp_attr,
3475 int *qp_attr_mask)
3476{
3477 unsigned long flags;
3478 int ret;
3479
3480 spin_lock_irqsave(&cm_id_priv->lock, flags);
3481 switch (cm_id_priv->id.state) {
3482 case IB_CM_REQ_RCVD:
3483 case IB_CM_MRA_REQ_SENT:
3484 case IB_CM_REP_RCVD:
3485 case IB_CM_MRA_REP_SENT:
3486 case IB_CM_REP_SENT:
3487 case IB_CM_MRA_REP_RCVD:
3488 case IB_CM_ESTABLISHED:
3489 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3490 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3491 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3492 qp_attr->path_mtu = cm_id_priv->path_mtu;
3493 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3494 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3495 if (cm_id_priv->qp_type == IB_QPT_RC) {
3496 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3497 IB_QP_MIN_RNR_TIMER;
3498 qp_attr->max_dest_rd_atomic =
3499 cm_id_priv->responder_resources;
3500 qp_attr->min_rnr_timer = 0;
3501 }
3502 if (cm_id_priv->alt_av.ah_attr.dlid) {
3503 *qp_attr_mask |= IB_QP_ALT_PATH;
3504 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3505 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3506 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3507 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3508 }
3509 ret = 0;
3510 break;
3511 default:
3512 ret = -EINVAL;
3513 break;
3514 }
3515 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3516 return ret;
3517}
3518
3519static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3520 struct ib_qp_attr *qp_attr,
3521 int *qp_attr_mask)
3522{
3523 unsigned long flags;
3524 int ret;
3525
3526 spin_lock_irqsave(&cm_id_priv->lock, flags);
3527 switch (cm_id_priv->id.state) {
3528 /* Allow transition to RTS before sending REP */
3529 case IB_CM_REQ_RCVD:
3530 case IB_CM_MRA_REQ_SENT:
3531
3532 case IB_CM_REP_RCVD:
3533 case IB_CM_MRA_REP_SENT:
3534 case IB_CM_REP_SENT:
3535 case IB_CM_MRA_REP_RCVD:
3536 case IB_CM_ESTABLISHED:
3537 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3538 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3539 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3540 if (cm_id_priv->qp_type == IB_QPT_RC) {
3541 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3542 IB_QP_RNR_RETRY |
3543 IB_QP_MAX_QP_RD_ATOMIC;
3544 qp_attr->timeout = cm_id_priv->av.timeout;
3545 qp_attr->retry_cnt = cm_id_priv->retry_count;
3546 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3547 qp_attr->max_rd_atomic =
3548 cm_id_priv->initiator_depth;
3549 }
3550 if (cm_id_priv->alt_av.ah_attr.dlid) {
3551 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3552 qp_attr->path_mig_state = IB_MIG_REARM;
3553 }
3554 } else {
3555 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3556 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3557 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3558 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3559 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3560 qp_attr->path_mig_state = IB_MIG_REARM;
3561 }
3562 ret = 0;
3563 break;
3564 default:
3565 ret = -EINVAL;
3566 break;
3567 }
3568 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3569 return ret;
3570}
3571
3572int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3573 struct ib_qp_attr *qp_attr,
3574 int *qp_attr_mask)
3575{
3576 struct cm_id_private *cm_id_priv;
3577 int ret;
3578
3579 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3580 switch (qp_attr->qp_state) {
3581 case IB_QPS_INIT:
3582 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3583 break;
3584 case IB_QPS_RTR:
3585 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3586 break;
3587 case IB_QPS_RTS:
3588 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3589 break;
3590 default:
3591 ret = -EINVAL;
3592 break;
3593 }
3594 return ret;
3595}
3596EXPORT_SYMBOL(ib_cm_init_qp_attr);
3597
3598static void cm_get_ack_delay(struct cm_device *cm_dev)
3599{
3600 struct ib_device_attr attr;
3601
3602 if (ib_query_device(cm_dev->ib_device, &attr))
3603 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3604 else
3605 cm_dev->ack_delay = attr.local_ca_ack_delay;
3606}
3607
3608static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3609 char *buf)
3610{
3611 struct cm_counter_group *group;
3612 struct cm_counter_attribute *cm_attr;
3613
3614 group = container_of(obj, struct cm_counter_group, obj);
3615 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3616
3617 return sprintf(buf, "%ld\n",
3618 atomic_long_read(&group->counter[cm_attr->index]));
3619}
3620
3621static const struct sysfs_ops cm_counter_ops = {
3622 .show = cm_show_counter
3623};
3624
3625static struct kobj_type cm_counter_obj_type = {
3626 .sysfs_ops = &cm_counter_ops,
3627 .default_attrs = cm_counter_default_attrs
3628};
3629
3630static void cm_release_port_obj(struct kobject *obj)
3631{
3632 struct cm_port *cm_port;
3633
3634 cm_port = container_of(obj, struct cm_port, port_obj);
3635 kfree(cm_port);
3636}
3637
3638static struct kobj_type cm_port_obj_type = {
3639 .release = cm_release_port_obj
3640};
3641
3642static char *cm_devnode(struct device *dev, mode_t *mode)
3643{
3644 if (mode)
3645 *mode = 0666;
3646 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3647}
3648
3649struct class cm_class = {
3650 .owner = THIS_MODULE,
3651 .name = "infiniband_cm",
3652 .devnode = cm_devnode,
3653};
3654EXPORT_SYMBOL(cm_class);
3655
3656static int cm_create_port_fs(struct cm_port *port)
3657{
3658 int i, ret;
3659
3660 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3661 &port->cm_dev->device->kobj,
3662 "%d", port->port_num);
3663 if (ret) {
3664 kfree(port);
3665 return ret;
3666 }
3667
3668 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3669 ret = kobject_init_and_add(&port->counter_group[i].obj,
3670 &cm_counter_obj_type,
3671 &port->port_obj,
3672 "%s", counter_group_names[i]);
3673 if (ret)
3674 goto error;
3675 }
3676
3677 return 0;
3678
3679error:
3680 while (i--)
3681 kobject_put(&port->counter_group[i].obj);
3682 kobject_put(&port->port_obj);
3683 return ret;
3684
3685}
3686
3687static void cm_remove_port_fs(struct cm_port *port)
3688{
3689 int i;
3690
3691 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3692 kobject_put(&port->counter_group[i].obj);
3693
3694 kobject_put(&port->port_obj);
3695}
3696
3697static void cm_add_one(struct ib_device *ib_device)
3698{
3699 struct cm_device *cm_dev;
3700 struct cm_port *port;
3701 struct ib_mad_reg_req reg_req = {
3702 .mgmt_class = IB_MGMT_CLASS_CM,
3703 .mgmt_class_version = IB_CM_CLASS_VERSION
3704 };
3705 struct ib_port_modify port_modify = {
3706 .set_port_cap_mask = IB_PORT_CM_SUP
3707 };
3708 unsigned long flags;
3709 int ret;
3710 u8 i;
3711
3712 if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
3713 return;
3714
3715 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3716 ib_device->phys_port_cnt, GFP_KERNEL);
3717 if (!cm_dev)
3718 return;
3719
3720 cm_dev->ib_device = ib_device;
3721 cm_get_ack_delay(cm_dev);
3722
3723 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3724 MKDEV(0, 0), NULL,
3725 "%s", ib_device->name);
3726 if (IS_ERR(cm_dev->device)) {
3727 kfree(cm_dev);
3728 return;
3729 }
3730
3731 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3732 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3733 port = kzalloc(sizeof *port, GFP_KERNEL);
3734 if (!port)
3735 goto error1;
3736
3737 cm_dev->port[i-1] = port;
3738 port->cm_dev = cm_dev;
3739 port->port_num = i;
3740
3741 ret = cm_create_port_fs(port);
3742 if (ret)
3743 goto error1;
3744
3745 port->mad_agent = ib_register_mad_agent(ib_device, i,
3746 IB_QPT_GSI,
3747 ®_req,
3748 0,
3749 cm_send_handler,
3750 cm_recv_handler,
3751 port);
3752 if (IS_ERR(port->mad_agent))
3753 goto error2;
3754
3755 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3756 if (ret)
3757 goto error3;
3758 }
3759 ib_set_client_data(ib_device, &cm_client, cm_dev);
3760
3761 write_lock_irqsave(&cm.device_lock, flags);
3762 list_add_tail(&cm_dev->list, &cm.device_list);
3763 write_unlock_irqrestore(&cm.device_lock, flags);
3764 return;
3765
3766error3:
3767 ib_unregister_mad_agent(port->mad_agent);
3768error2:
3769 cm_remove_port_fs(port);
3770error1:
3771 port_modify.set_port_cap_mask = 0;
3772 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3773 while (--i) {
3774 port = cm_dev->port[i-1];
3775 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3776 ib_unregister_mad_agent(port->mad_agent);
3777 cm_remove_port_fs(port);
3778 }
3779 device_unregister(cm_dev->device);
3780 kfree(cm_dev);
3781}
3782
3783static void cm_remove_one(struct ib_device *ib_device)
3784{
3785 struct cm_device *cm_dev;
3786 struct cm_port *port;
3787 struct ib_port_modify port_modify = {
3788 .clr_port_cap_mask = IB_PORT_CM_SUP
3789 };
3790 unsigned long flags;
3791 int i;
3792
3793 cm_dev = ib_get_client_data(ib_device, &cm_client);
3794 if (!cm_dev)
3795 return;
3796
3797 write_lock_irqsave(&cm.device_lock, flags);
3798 list_del(&cm_dev->list);
3799 write_unlock_irqrestore(&cm.device_lock, flags);
3800
3801 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3802 port = cm_dev->port[i-1];
3803 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3804 ib_unregister_mad_agent(port->mad_agent);
3805 flush_workqueue(cm.wq);
3806 cm_remove_port_fs(port);
3807 }
3808 device_unregister(cm_dev->device);
3809 kfree(cm_dev);
3810}
3811
3812static int __init ib_cm_init(void)
3813{
3814 int ret;
3815
3816 memset(&cm, 0, sizeof cm);
3817 INIT_LIST_HEAD(&cm.device_list);
3818 rwlock_init(&cm.device_lock);
3819 spin_lock_init(&cm.lock);
3820 cm.listen_service_table = RB_ROOT;
3821 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3822 cm.remote_id_table = RB_ROOT;
3823 cm.remote_qp_table = RB_ROOT;
3824 cm.remote_sidr_table = RB_ROOT;
3825 idr_init(&cm.local_id_table);
3826 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3827 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3828 INIT_LIST_HEAD(&cm.timewait_list);
3829
3830 ret = class_register(&cm_class);
3831 if (ret)
3832 return -ENOMEM;
3833
3834 cm.wq = create_workqueue("ib_cm");
3835 if (!cm.wq) {
3836 ret = -ENOMEM;
3837 goto error1;
3838 }
3839
3840 ret = ib_register_client(&cm_client);
3841 if (ret)
3842 goto error2;
3843
3844 return 0;
3845error2:
3846 destroy_workqueue(cm.wq);
3847error1:
3848 class_unregister(&cm_class);
3849 return ret;
3850}
3851
3852static void __exit ib_cm_cleanup(void)
3853{
3854 struct cm_timewait_info *timewait_info, *tmp;
3855
3856 spin_lock_irq(&cm.lock);
3857 list_for_each_entry(timewait_info, &cm.timewait_list, list)
3858 cancel_delayed_work(&timewait_info->work.work);
3859 spin_unlock_irq(&cm.lock);
3860
3861 ib_unregister_client(&cm_client);
3862 destroy_workqueue(cm.wq);
3863
3864 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
3865 list_del(&timewait_info->list);
3866 kfree(timewait_info);
3867 }
3868
3869 class_unregister(&cm_class);
3870 idr_destroy(&cm.local_id_table);
3871}
3872
3873module_init(ib_cm_init);
3874module_exit(ib_cm_cleanup);
3875