Loading...
1/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/completion.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/mutex.h>
40#include <linux/random.h>
41#include <linux/idr.h>
42#include <linux/inetdevice.h>
43#include <linux/slab.h>
44
45#include <net/tcp.h>
46#include <net/ipv6.h>
47
48#include <rdma/rdma_cm.h>
49#include <rdma/rdma_cm_ib.h>
50#include <rdma/rdma_netlink.h>
51#include <rdma/ib_cache.h>
52#include <rdma/ib_cm.h>
53#include <rdma/ib_sa.h>
54#include <rdma/iw_cm.h>
55
56MODULE_AUTHOR("Sean Hefty");
57MODULE_DESCRIPTION("Generic RDMA CM Agent");
58MODULE_LICENSE("Dual BSD/GPL");
59
60#define CMA_CM_RESPONSE_TIMEOUT 20
61#define CMA_MAX_CM_RETRIES 15
62#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
63#define CMA_IBOE_PACKET_LIFETIME 18
64
65static void cma_add_one(struct ib_device *device);
66static void cma_remove_one(struct ib_device *device);
67
68static struct ib_client cma_client = {
69 .name = "cma",
70 .add = cma_add_one,
71 .remove = cma_remove_one
72};
73
74static struct ib_sa_client sa_client;
75static struct rdma_addr_client addr_client;
76static LIST_HEAD(dev_list);
77static LIST_HEAD(listen_any_list);
78static DEFINE_MUTEX(lock);
79static struct workqueue_struct *cma_wq;
80static DEFINE_IDR(sdp_ps);
81static DEFINE_IDR(tcp_ps);
82static DEFINE_IDR(udp_ps);
83static DEFINE_IDR(ipoib_ps);
84
85struct cma_device {
86 struct list_head list;
87 struct ib_device *device;
88 struct completion comp;
89 atomic_t refcount;
90 struct list_head id_list;
91};
92
93struct rdma_bind_list {
94 struct idr *ps;
95 struct hlist_head owners;
96 unsigned short port;
97};
98
99/*
100 * Device removal can occur at anytime, so we need extra handling to
101 * serialize notifying the user of device removal with other callbacks.
102 * We do this by disabling removal notification while a callback is in process,
103 * and reporting it after the callback completes.
104 */
105struct rdma_id_private {
106 struct rdma_cm_id id;
107
108 struct rdma_bind_list *bind_list;
109 struct hlist_node node;
110 struct list_head list; /* listen_any_list or cma_device.list */
111 struct list_head listen_list; /* per device listens */
112 struct cma_device *cma_dev;
113 struct list_head mc_list;
114
115 int internal_id;
116 enum rdma_cm_state state;
117 spinlock_t lock;
118 struct mutex qp_mutex;
119
120 struct completion comp;
121 atomic_t refcount;
122 struct mutex handler_mutex;
123
124 int backlog;
125 int timeout_ms;
126 struct ib_sa_query *query;
127 int query_id;
128 union {
129 struct ib_cm_id *ib;
130 struct iw_cm_id *iw;
131 } cm_id;
132
133 u32 seq_num;
134 u32 qkey;
135 u32 qp_num;
136 pid_t owner;
137 u8 srq;
138 u8 tos;
139 u8 reuseaddr;
140};
141
142struct cma_multicast {
143 struct rdma_id_private *id_priv;
144 union {
145 struct ib_sa_multicast *ib;
146 } multicast;
147 struct list_head list;
148 void *context;
149 struct sockaddr_storage addr;
150 struct kref mcref;
151};
152
153struct cma_work {
154 struct work_struct work;
155 struct rdma_id_private *id;
156 enum rdma_cm_state old_state;
157 enum rdma_cm_state new_state;
158 struct rdma_cm_event event;
159};
160
161struct cma_ndev_work {
162 struct work_struct work;
163 struct rdma_id_private *id;
164 struct rdma_cm_event event;
165};
166
167struct iboe_mcast_work {
168 struct work_struct work;
169 struct rdma_id_private *id;
170 struct cma_multicast *mc;
171};
172
173union cma_ip_addr {
174 struct in6_addr ip6;
175 struct {
176 __be32 pad[3];
177 __be32 addr;
178 } ip4;
179};
180
181struct cma_hdr {
182 u8 cma_version;
183 u8 ip_version; /* IP version: 7:4 */
184 __be16 port;
185 union cma_ip_addr src_addr;
186 union cma_ip_addr dst_addr;
187};
188
189struct sdp_hh {
190 u8 bsdh[16];
191 u8 sdp_version; /* Major version: 7:4 */
192 u8 ip_version; /* IP version: 7:4 */
193 u8 sdp_specific1[10];
194 __be16 port;
195 __be16 sdp_specific2;
196 union cma_ip_addr src_addr;
197 union cma_ip_addr dst_addr;
198};
199
200struct sdp_hah {
201 u8 bsdh[16];
202 u8 sdp_version;
203};
204
205#define CMA_VERSION 0x00
206#define SDP_MAJ_VERSION 0x2
207
208static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
209{
210 unsigned long flags;
211 int ret;
212
213 spin_lock_irqsave(&id_priv->lock, flags);
214 ret = (id_priv->state == comp);
215 spin_unlock_irqrestore(&id_priv->lock, flags);
216 return ret;
217}
218
219static int cma_comp_exch(struct rdma_id_private *id_priv,
220 enum rdma_cm_state comp, enum rdma_cm_state exch)
221{
222 unsigned long flags;
223 int ret;
224
225 spin_lock_irqsave(&id_priv->lock, flags);
226 if ((ret = (id_priv->state == comp)))
227 id_priv->state = exch;
228 spin_unlock_irqrestore(&id_priv->lock, flags);
229 return ret;
230}
231
232static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
233 enum rdma_cm_state exch)
234{
235 unsigned long flags;
236 enum rdma_cm_state old;
237
238 spin_lock_irqsave(&id_priv->lock, flags);
239 old = id_priv->state;
240 id_priv->state = exch;
241 spin_unlock_irqrestore(&id_priv->lock, flags);
242 return old;
243}
244
245static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
246{
247 return hdr->ip_version >> 4;
248}
249
250static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
251{
252 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
253}
254
255static inline u8 sdp_get_majv(u8 sdp_version)
256{
257 return sdp_version >> 4;
258}
259
260static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
261{
262 return hh->ip_version >> 4;
263}
264
265static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
266{
267 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
268}
269
270static void cma_attach_to_dev(struct rdma_id_private *id_priv,
271 struct cma_device *cma_dev)
272{
273 atomic_inc(&cma_dev->refcount);
274 id_priv->cma_dev = cma_dev;
275 id_priv->id.device = cma_dev->device;
276 id_priv->id.route.addr.dev_addr.transport =
277 rdma_node_get_transport(cma_dev->device->node_type);
278 list_add_tail(&id_priv->list, &cma_dev->id_list);
279}
280
281static inline void cma_deref_dev(struct cma_device *cma_dev)
282{
283 if (atomic_dec_and_test(&cma_dev->refcount))
284 complete(&cma_dev->comp);
285}
286
287static inline void release_mc(struct kref *kref)
288{
289 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
290
291 kfree(mc->multicast.ib);
292 kfree(mc);
293}
294
295static void cma_release_dev(struct rdma_id_private *id_priv)
296{
297 mutex_lock(&lock);
298 list_del(&id_priv->list);
299 cma_deref_dev(id_priv->cma_dev);
300 id_priv->cma_dev = NULL;
301 mutex_unlock(&lock);
302}
303
304static int cma_set_qkey(struct rdma_id_private *id_priv)
305{
306 struct ib_sa_mcmember_rec rec;
307 int ret = 0;
308
309 if (id_priv->qkey)
310 return 0;
311
312 switch (id_priv->id.ps) {
313 case RDMA_PS_UDP:
314 id_priv->qkey = RDMA_UDP_QKEY;
315 break;
316 case RDMA_PS_IPOIB:
317 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
318 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
319 id_priv->id.port_num, &rec.mgid,
320 &rec);
321 if (!ret)
322 id_priv->qkey = be32_to_cpu(rec.qkey);
323 break;
324 default:
325 break;
326 }
327 return ret;
328}
329
330static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
331{
332 int i;
333 int err;
334 struct ib_port_attr props;
335 union ib_gid tmp;
336
337 err = ib_query_port(device, port_num, &props);
338 if (err)
339 return 1;
340
341 for (i = 0; i < props.gid_tbl_len; ++i) {
342 err = ib_query_gid(device, port_num, i, &tmp);
343 if (err)
344 return 1;
345 if (!memcmp(&tmp, gid, sizeof tmp))
346 return 0;
347 }
348
349 return -EAGAIN;
350}
351
352static int cma_acquire_dev(struct rdma_id_private *id_priv)
353{
354 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
355 struct cma_device *cma_dev;
356 union ib_gid gid, iboe_gid;
357 int ret = -ENODEV;
358 u8 port;
359 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
360 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
361
362 if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
363 id_priv->id.ps == RDMA_PS_IPOIB)
364 return -EINVAL;
365
366 mutex_lock(&lock);
367 iboe_addr_get_sgid(dev_addr, &iboe_gid);
368 memcpy(&gid, dev_addr->src_dev_addr +
369 rdma_addr_gid_offset(dev_addr), sizeof gid);
370 list_for_each_entry(cma_dev, &dev_list, list) {
371 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
372 if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
373 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
374 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
375 ret = find_gid_port(cma_dev->device, &iboe_gid, port);
376 else
377 ret = find_gid_port(cma_dev->device, &gid, port);
378
379 if (!ret) {
380 id_priv->id.port_num = port;
381 goto out;
382 } else if (ret == 1)
383 break;
384 }
385 }
386 }
387
388out:
389 if (!ret)
390 cma_attach_to_dev(id_priv, cma_dev);
391
392 mutex_unlock(&lock);
393 return ret;
394}
395
396static void cma_deref_id(struct rdma_id_private *id_priv)
397{
398 if (atomic_dec_and_test(&id_priv->refcount))
399 complete(&id_priv->comp);
400}
401
402static int cma_disable_callback(struct rdma_id_private *id_priv,
403 enum rdma_cm_state state)
404{
405 mutex_lock(&id_priv->handler_mutex);
406 if (id_priv->state != state) {
407 mutex_unlock(&id_priv->handler_mutex);
408 return -EINVAL;
409 }
410 return 0;
411}
412
413struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
414 void *context, enum rdma_port_space ps,
415 enum ib_qp_type qp_type)
416{
417 struct rdma_id_private *id_priv;
418
419 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
420 if (!id_priv)
421 return ERR_PTR(-ENOMEM);
422
423 id_priv->owner = task_pid_nr(current);
424 id_priv->state = RDMA_CM_IDLE;
425 id_priv->id.context = context;
426 id_priv->id.event_handler = event_handler;
427 id_priv->id.ps = ps;
428 id_priv->id.qp_type = qp_type;
429 spin_lock_init(&id_priv->lock);
430 mutex_init(&id_priv->qp_mutex);
431 init_completion(&id_priv->comp);
432 atomic_set(&id_priv->refcount, 1);
433 mutex_init(&id_priv->handler_mutex);
434 INIT_LIST_HEAD(&id_priv->listen_list);
435 INIT_LIST_HEAD(&id_priv->mc_list);
436 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
437
438 return &id_priv->id;
439}
440EXPORT_SYMBOL(rdma_create_id);
441
442static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
443{
444 struct ib_qp_attr qp_attr;
445 int qp_attr_mask, ret;
446
447 qp_attr.qp_state = IB_QPS_INIT;
448 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
449 if (ret)
450 return ret;
451
452 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
453 if (ret)
454 return ret;
455
456 qp_attr.qp_state = IB_QPS_RTR;
457 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
458 if (ret)
459 return ret;
460
461 qp_attr.qp_state = IB_QPS_RTS;
462 qp_attr.sq_psn = 0;
463 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
464
465 return ret;
466}
467
468static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
469{
470 struct ib_qp_attr qp_attr;
471 int qp_attr_mask, ret;
472
473 qp_attr.qp_state = IB_QPS_INIT;
474 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
475 if (ret)
476 return ret;
477
478 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
479}
480
481int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
482 struct ib_qp_init_attr *qp_init_attr)
483{
484 struct rdma_id_private *id_priv;
485 struct ib_qp *qp;
486 int ret;
487
488 id_priv = container_of(id, struct rdma_id_private, id);
489 if (id->device != pd->device)
490 return -EINVAL;
491
492 qp = ib_create_qp(pd, qp_init_attr);
493 if (IS_ERR(qp))
494 return PTR_ERR(qp);
495
496 if (id->qp_type == IB_QPT_UD)
497 ret = cma_init_ud_qp(id_priv, qp);
498 else
499 ret = cma_init_conn_qp(id_priv, qp);
500 if (ret)
501 goto err;
502
503 id->qp = qp;
504 id_priv->qp_num = qp->qp_num;
505 id_priv->srq = (qp->srq != NULL);
506 return 0;
507err:
508 ib_destroy_qp(qp);
509 return ret;
510}
511EXPORT_SYMBOL(rdma_create_qp);
512
513void rdma_destroy_qp(struct rdma_cm_id *id)
514{
515 struct rdma_id_private *id_priv;
516
517 id_priv = container_of(id, struct rdma_id_private, id);
518 mutex_lock(&id_priv->qp_mutex);
519 ib_destroy_qp(id_priv->id.qp);
520 id_priv->id.qp = NULL;
521 mutex_unlock(&id_priv->qp_mutex);
522}
523EXPORT_SYMBOL(rdma_destroy_qp);
524
525static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
526 struct rdma_conn_param *conn_param)
527{
528 struct ib_qp_attr qp_attr;
529 int qp_attr_mask, ret;
530
531 mutex_lock(&id_priv->qp_mutex);
532 if (!id_priv->id.qp) {
533 ret = 0;
534 goto out;
535 }
536
537 /* Need to update QP attributes from default values. */
538 qp_attr.qp_state = IB_QPS_INIT;
539 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
540 if (ret)
541 goto out;
542
543 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
544 if (ret)
545 goto out;
546
547 qp_attr.qp_state = IB_QPS_RTR;
548 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
549 if (ret)
550 goto out;
551
552 if (conn_param)
553 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
554 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
555out:
556 mutex_unlock(&id_priv->qp_mutex);
557 return ret;
558}
559
560static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
561 struct rdma_conn_param *conn_param)
562{
563 struct ib_qp_attr qp_attr;
564 int qp_attr_mask, ret;
565
566 mutex_lock(&id_priv->qp_mutex);
567 if (!id_priv->id.qp) {
568 ret = 0;
569 goto out;
570 }
571
572 qp_attr.qp_state = IB_QPS_RTS;
573 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
574 if (ret)
575 goto out;
576
577 if (conn_param)
578 qp_attr.max_rd_atomic = conn_param->initiator_depth;
579 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
580out:
581 mutex_unlock(&id_priv->qp_mutex);
582 return ret;
583}
584
585static int cma_modify_qp_err(struct rdma_id_private *id_priv)
586{
587 struct ib_qp_attr qp_attr;
588 int ret;
589
590 mutex_lock(&id_priv->qp_mutex);
591 if (!id_priv->id.qp) {
592 ret = 0;
593 goto out;
594 }
595
596 qp_attr.qp_state = IB_QPS_ERR;
597 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
598out:
599 mutex_unlock(&id_priv->qp_mutex);
600 return ret;
601}
602
603static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
604 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
605{
606 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
607 int ret;
608 u16 pkey;
609
610 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
611 IB_LINK_LAYER_INFINIBAND)
612 pkey = ib_addr_get_pkey(dev_addr);
613 else
614 pkey = 0xffff;
615
616 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
617 pkey, &qp_attr->pkey_index);
618 if (ret)
619 return ret;
620
621 qp_attr->port_num = id_priv->id.port_num;
622 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
623
624 if (id_priv->id.qp_type == IB_QPT_UD) {
625 ret = cma_set_qkey(id_priv);
626 if (ret)
627 return ret;
628
629 qp_attr->qkey = id_priv->qkey;
630 *qp_attr_mask |= IB_QP_QKEY;
631 } else {
632 qp_attr->qp_access_flags = 0;
633 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
634 }
635 return 0;
636}
637
638int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
639 int *qp_attr_mask)
640{
641 struct rdma_id_private *id_priv;
642 int ret = 0;
643
644 id_priv = container_of(id, struct rdma_id_private, id);
645 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
646 case RDMA_TRANSPORT_IB:
647 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
648 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
649 else
650 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
651 qp_attr_mask);
652 if (qp_attr->qp_state == IB_QPS_RTR)
653 qp_attr->rq_psn = id_priv->seq_num;
654 break;
655 case RDMA_TRANSPORT_IWARP:
656 if (!id_priv->cm_id.iw) {
657 qp_attr->qp_access_flags = 0;
658 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
659 } else
660 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
661 qp_attr_mask);
662 break;
663 default:
664 ret = -ENOSYS;
665 break;
666 }
667
668 return ret;
669}
670EXPORT_SYMBOL(rdma_init_qp_attr);
671
672static inline int cma_zero_addr(struct sockaddr *addr)
673{
674 struct in6_addr *ip6;
675
676 if (addr->sa_family == AF_INET)
677 return ipv4_is_zeronet(
678 ((struct sockaddr_in *)addr)->sin_addr.s_addr);
679 else {
680 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
681 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
682 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
683 }
684}
685
686static inline int cma_loopback_addr(struct sockaddr *addr)
687{
688 if (addr->sa_family == AF_INET)
689 return ipv4_is_loopback(
690 ((struct sockaddr_in *) addr)->sin_addr.s_addr);
691 else
692 return ipv6_addr_loopback(
693 &((struct sockaddr_in6 *) addr)->sin6_addr);
694}
695
696static inline int cma_any_addr(struct sockaddr *addr)
697{
698 return cma_zero_addr(addr) || cma_loopback_addr(addr);
699}
700
701static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
702{
703 if (src->sa_family != dst->sa_family)
704 return -1;
705
706 switch (src->sa_family) {
707 case AF_INET:
708 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
709 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
710 default:
711 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
712 &((struct sockaddr_in6 *) dst)->sin6_addr);
713 }
714}
715
716static inline __be16 cma_port(struct sockaddr *addr)
717{
718 if (addr->sa_family == AF_INET)
719 return ((struct sockaddr_in *) addr)->sin_port;
720 else
721 return ((struct sockaddr_in6 *) addr)->sin6_port;
722}
723
724static inline int cma_any_port(struct sockaddr *addr)
725{
726 return !cma_port(addr);
727}
728
729static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
730 u8 *ip_ver, __be16 *port,
731 union cma_ip_addr **src, union cma_ip_addr **dst)
732{
733 switch (ps) {
734 case RDMA_PS_SDP:
735 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
736 SDP_MAJ_VERSION)
737 return -EINVAL;
738
739 *ip_ver = sdp_get_ip_ver(hdr);
740 *port = ((struct sdp_hh *) hdr)->port;
741 *src = &((struct sdp_hh *) hdr)->src_addr;
742 *dst = &((struct sdp_hh *) hdr)->dst_addr;
743 break;
744 default:
745 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
746 return -EINVAL;
747
748 *ip_ver = cma_get_ip_ver(hdr);
749 *port = ((struct cma_hdr *) hdr)->port;
750 *src = &((struct cma_hdr *) hdr)->src_addr;
751 *dst = &((struct cma_hdr *) hdr)->dst_addr;
752 break;
753 }
754
755 if (*ip_ver != 4 && *ip_ver != 6)
756 return -EINVAL;
757 return 0;
758}
759
760static void cma_save_net_info(struct rdma_addr *addr,
761 struct rdma_addr *listen_addr,
762 u8 ip_ver, __be16 port,
763 union cma_ip_addr *src, union cma_ip_addr *dst)
764{
765 struct sockaddr_in *listen4, *ip4;
766 struct sockaddr_in6 *listen6, *ip6;
767
768 switch (ip_ver) {
769 case 4:
770 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
771 ip4 = (struct sockaddr_in *) &addr->src_addr;
772 ip4->sin_family = listen4->sin_family;
773 ip4->sin_addr.s_addr = dst->ip4.addr;
774 ip4->sin_port = listen4->sin_port;
775
776 ip4 = (struct sockaddr_in *) &addr->dst_addr;
777 ip4->sin_family = listen4->sin_family;
778 ip4->sin_addr.s_addr = src->ip4.addr;
779 ip4->sin_port = port;
780 break;
781 case 6:
782 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
783 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
784 ip6->sin6_family = listen6->sin6_family;
785 ip6->sin6_addr = dst->ip6;
786 ip6->sin6_port = listen6->sin6_port;
787
788 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
789 ip6->sin6_family = listen6->sin6_family;
790 ip6->sin6_addr = src->ip6;
791 ip6->sin6_port = port;
792 break;
793 default:
794 break;
795 }
796}
797
798static inline int cma_user_data_offset(enum rdma_port_space ps)
799{
800 switch (ps) {
801 case RDMA_PS_SDP:
802 return 0;
803 default:
804 return sizeof(struct cma_hdr);
805 }
806}
807
808static void cma_cancel_route(struct rdma_id_private *id_priv)
809{
810 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
811 case IB_LINK_LAYER_INFINIBAND:
812 if (id_priv->query)
813 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
814 break;
815 default:
816 break;
817 }
818}
819
820static void cma_cancel_listens(struct rdma_id_private *id_priv)
821{
822 struct rdma_id_private *dev_id_priv;
823
824 /*
825 * Remove from listen_any_list to prevent added devices from spawning
826 * additional listen requests.
827 */
828 mutex_lock(&lock);
829 list_del(&id_priv->list);
830
831 while (!list_empty(&id_priv->listen_list)) {
832 dev_id_priv = list_entry(id_priv->listen_list.next,
833 struct rdma_id_private, listen_list);
834 /* sync with device removal to avoid duplicate destruction */
835 list_del_init(&dev_id_priv->list);
836 list_del(&dev_id_priv->listen_list);
837 mutex_unlock(&lock);
838
839 rdma_destroy_id(&dev_id_priv->id);
840 mutex_lock(&lock);
841 }
842 mutex_unlock(&lock);
843}
844
845static void cma_cancel_operation(struct rdma_id_private *id_priv,
846 enum rdma_cm_state state)
847{
848 switch (state) {
849 case RDMA_CM_ADDR_QUERY:
850 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
851 break;
852 case RDMA_CM_ROUTE_QUERY:
853 cma_cancel_route(id_priv);
854 break;
855 case RDMA_CM_LISTEN:
856 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
857 && !id_priv->cma_dev)
858 cma_cancel_listens(id_priv);
859 break;
860 default:
861 break;
862 }
863}
864
865static void cma_release_port(struct rdma_id_private *id_priv)
866{
867 struct rdma_bind_list *bind_list = id_priv->bind_list;
868
869 if (!bind_list)
870 return;
871
872 mutex_lock(&lock);
873 hlist_del(&id_priv->node);
874 if (hlist_empty(&bind_list->owners)) {
875 idr_remove(bind_list->ps, bind_list->port);
876 kfree(bind_list);
877 }
878 mutex_unlock(&lock);
879}
880
881static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
882{
883 struct cma_multicast *mc;
884
885 while (!list_empty(&id_priv->mc_list)) {
886 mc = container_of(id_priv->mc_list.next,
887 struct cma_multicast, list);
888 list_del(&mc->list);
889 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
890 case IB_LINK_LAYER_INFINIBAND:
891 ib_sa_free_multicast(mc->multicast.ib);
892 kfree(mc);
893 break;
894 case IB_LINK_LAYER_ETHERNET:
895 kref_put(&mc->mcref, release_mc);
896 break;
897 default:
898 break;
899 }
900 }
901}
902
903void rdma_destroy_id(struct rdma_cm_id *id)
904{
905 struct rdma_id_private *id_priv;
906 enum rdma_cm_state state;
907
908 id_priv = container_of(id, struct rdma_id_private, id);
909 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
910 cma_cancel_operation(id_priv, state);
911
912 /*
913 * Wait for any active callback to finish. New callbacks will find
914 * the id_priv state set to destroying and abort.
915 */
916 mutex_lock(&id_priv->handler_mutex);
917 mutex_unlock(&id_priv->handler_mutex);
918
919 if (id_priv->cma_dev) {
920 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
921 case RDMA_TRANSPORT_IB:
922 if (id_priv->cm_id.ib)
923 ib_destroy_cm_id(id_priv->cm_id.ib);
924 break;
925 case RDMA_TRANSPORT_IWARP:
926 if (id_priv->cm_id.iw)
927 iw_destroy_cm_id(id_priv->cm_id.iw);
928 break;
929 default:
930 break;
931 }
932 cma_leave_mc_groups(id_priv);
933 cma_release_dev(id_priv);
934 }
935
936 cma_release_port(id_priv);
937 cma_deref_id(id_priv);
938 wait_for_completion(&id_priv->comp);
939
940 if (id_priv->internal_id)
941 cma_deref_id(id_priv->id.context);
942
943 kfree(id_priv->id.route.path_rec);
944 kfree(id_priv);
945}
946EXPORT_SYMBOL(rdma_destroy_id);
947
948static int cma_rep_recv(struct rdma_id_private *id_priv)
949{
950 int ret;
951
952 ret = cma_modify_qp_rtr(id_priv, NULL);
953 if (ret)
954 goto reject;
955
956 ret = cma_modify_qp_rts(id_priv, NULL);
957 if (ret)
958 goto reject;
959
960 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
961 if (ret)
962 goto reject;
963
964 return 0;
965reject:
966 cma_modify_qp_err(id_priv);
967 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
968 NULL, 0, NULL, 0);
969 return ret;
970}
971
972static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
973{
974 if (id_priv->id.ps == RDMA_PS_SDP &&
975 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
976 SDP_MAJ_VERSION)
977 return -EINVAL;
978
979 return 0;
980}
981
982static void cma_set_rep_event_data(struct rdma_cm_event *event,
983 struct ib_cm_rep_event_param *rep_data,
984 void *private_data)
985{
986 event->param.conn.private_data = private_data;
987 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
988 event->param.conn.responder_resources = rep_data->responder_resources;
989 event->param.conn.initiator_depth = rep_data->initiator_depth;
990 event->param.conn.flow_control = rep_data->flow_control;
991 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
992 event->param.conn.srq = rep_data->srq;
993 event->param.conn.qp_num = rep_data->remote_qpn;
994}
995
996static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
997{
998 struct rdma_id_private *id_priv = cm_id->context;
999 struct rdma_cm_event event;
1000 int ret = 0;
1001
1002 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1003 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
1004 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1005 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
1006 return 0;
1007
1008 memset(&event, 0, sizeof event);
1009 switch (ib_event->event) {
1010 case IB_CM_REQ_ERROR:
1011 case IB_CM_REP_ERROR:
1012 event.event = RDMA_CM_EVENT_UNREACHABLE;
1013 event.status = -ETIMEDOUT;
1014 break;
1015 case IB_CM_REP_RECEIVED:
1016 event.status = cma_verify_rep(id_priv, ib_event->private_data);
1017 if (event.status)
1018 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1019 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
1020 event.status = cma_rep_recv(id_priv);
1021 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
1022 RDMA_CM_EVENT_ESTABLISHED;
1023 } else
1024 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
1025 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
1026 ib_event->private_data);
1027 break;
1028 case IB_CM_RTU_RECEIVED:
1029 case IB_CM_USER_ESTABLISHED:
1030 event.event = RDMA_CM_EVENT_ESTABLISHED;
1031 break;
1032 case IB_CM_DREQ_ERROR:
1033 event.status = -ETIMEDOUT; /* fall through */
1034 case IB_CM_DREQ_RECEIVED:
1035 case IB_CM_DREP_RECEIVED:
1036 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1037 RDMA_CM_DISCONNECT))
1038 goto out;
1039 event.event = RDMA_CM_EVENT_DISCONNECTED;
1040 break;
1041 case IB_CM_TIMEWAIT_EXIT:
1042 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
1043 break;
1044 case IB_CM_MRA_RECEIVED:
1045 /* ignore event */
1046 goto out;
1047 case IB_CM_REJ_RECEIVED:
1048 cma_modify_qp_err(id_priv);
1049 event.status = ib_event->param.rej_rcvd.reason;
1050 event.event = RDMA_CM_EVENT_REJECTED;
1051 event.param.conn.private_data = ib_event->private_data;
1052 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
1053 break;
1054 default:
1055 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
1056 ib_event->event);
1057 goto out;
1058 }
1059
1060 ret = id_priv->id.event_handler(&id_priv->id, &event);
1061 if (ret) {
1062 /* Destroy the CM ID by returning a non-zero value. */
1063 id_priv->cm_id.ib = NULL;
1064 cma_exch(id_priv, RDMA_CM_DESTROYING);
1065 mutex_unlock(&id_priv->handler_mutex);
1066 rdma_destroy_id(&id_priv->id);
1067 return ret;
1068 }
1069out:
1070 mutex_unlock(&id_priv->handler_mutex);
1071 return ret;
1072}
1073
1074static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1075 struct ib_cm_event *ib_event)
1076{
1077 struct rdma_id_private *id_priv;
1078 struct rdma_cm_id *id;
1079 struct rdma_route *rt;
1080 union cma_ip_addr *src, *dst;
1081 __be16 port;
1082 u8 ip_ver;
1083 int ret;
1084
1085 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1086 &ip_ver, &port, &src, &dst))
1087 return NULL;
1088
1089 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1090 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1091 if (IS_ERR(id))
1092 return NULL;
1093
1094 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1095 ip_ver, port, src, dst);
1096
1097 rt = &id->route;
1098 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1099 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1100 GFP_KERNEL);
1101 if (!rt->path_rec)
1102 goto err;
1103
1104 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1105 if (rt->num_paths == 2)
1106 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1107
1108 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
1109 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
1110 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
1111 ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
1112 } else {
1113 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
1114 &rt->addr.dev_addr);
1115 if (ret)
1116 goto err;
1117 }
1118 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1119
1120 id_priv = container_of(id, struct rdma_id_private, id);
1121 id_priv->state = RDMA_CM_CONNECT;
1122 return id_priv;
1123
1124err:
1125 rdma_destroy_id(id);
1126 return NULL;
1127}
1128
1129static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1130 struct ib_cm_event *ib_event)
1131{
1132 struct rdma_id_private *id_priv;
1133 struct rdma_cm_id *id;
1134 union cma_ip_addr *src, *dst;
1135 __be16 port;
1136 u8 ip_ver;
1137 int ret;
1138
1139 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1140 listen_id->ps, IB_QPT_UD);
1141 if (IS_ERR(id))
1142 return NULL;
1143
1144
1145 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1146 &ip_ver, &port, &src, &dst))
1147 goto err;
1148
1149 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1150 ip_ver, port, src, dst);
1151
1152 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
1153 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1154 &id->route.addr.dev_addr);
1155 if (ret)
1156 goto err;
1157 }
1158
1159 id_priv = container_of(id, struct rdma_id_private, id);
1160 id_priv->state = RDMA_CM_CONNECT;
1161 return id_priv;
1162err:
1163 rdma_destroy_id(id);
1164 return NULL;
1165}
1166
1167static void cma_set_req_event_data(struct rdma_cm_event *event,
1168 struct ib_cm_req_event_param *req_data,
1169 void *private_data, int offset)
1170{
1171 event->param.conn.private_data = private_data + offset;
1172 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1173 event->param.conn.responder_resources = req_data->responder_resources;
1174 event->param.conn.initiator_depth = req_data->initiator_depth;
1175 event->param.conn.flow_control = req_data->flow_control;
1176 event->param.conn.retry_count = req_data->retry_count;
1177 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1178 event->param.conn.srq = req_data->srq;
1179 event->param.conn.qp_num = req_data->remote_qpn;
1180}
1181
1182static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1183{
1184 struct rdma_id_private *listen_id, *conn_id;
1185 struct rdma_cm_event event;
1186 int offset, ret;
1187
1188 listen_id = cm_id->context;
1189 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1190 return -ECONNABORTED;
1191
1192 memset(&event, 0, sizeof event);
1193 offset = cma_user_data_offset(listen_id->id.ps);
1194 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1195 if (listen_id->id.qp_type == IB_QPT_UD) {
1196 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1197 event.param.ud.private_data = ib_event->private_data + offset;
1198 event.param.ud.private_data_len =
1199 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1200 } else {
1201 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1202 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1203 ib_event->private_data, offset);
1204 }
1205 if (!conn_id) {
1206 ret = -ENOMEM;
1207 goto out;
1208 }
1209
1210 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1211 ret = cma_acquire_dev(conn_id);
1212 if (ret)
1213 goto release_conn_id;
1214
1215 conn_id->cm_id.ib = cm_id;
1216 cm_id->context = conn_id;
1217 cm_id->cm_handler = cma_ib_handler;
1218
1219 /*
1220 * Protect against the user destroying conn_id from another thread
1221 * until we're done accessing it.
1222 */
1223 atomic_inc(&conn_id->refcount);
1224 ret = conn_id->id.event_handler(&conn_id->id, &event);
1225 if (!ret) {
1226 /*
1227 * Acquire mutex to prevent user executing rdma_destroy_id()
1228 * while we're accessing the cm_id.
1229 */
1230 mutex_lock(&lock);
1231 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
1232 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1233 mutex_unlock(&lock);
1234 mutex_unlock(&conn_id->handler_mutex);
1235 cma_deref_id(conn_id);
1236 goto out;
1237 }
1238 cma_deref_id(conn_id);
1239
1240 /* Destroy the CM ID by returning a non-zero value. */
1241 conn_id->cm_id.ib = NULL;
1242
1243release_conn_id:
1244 cma_exch(conn_id, RDMA_CM_DESTROYING);
1245 mutex_unlock(&conn_id->handler_mutex);
1246 rdma_destroy_id(&conn_id->id);
1247
1248out:
1249 mutex_unlock(&listen_id->handler_mutex);
1250 return ret;
1251}
1252
1253static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1254{
1255 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
1256}
1257
1258static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1259 struct ib_cm_compare_data *compare)
1260{
1261 struct cma_hdr *cma_data, *cma_mask;
1262 struct sdp_hh *sdp_data, *sdp_mask;
1263 __be32 ip4_addr;
1264 struct in6_addr ip6_addr;
1265
1266 memset(compare, 0, sizeof *compare);
1267 cma_data = (void *) compare->data;
1268 cma_mask = (void *) compare->mask;
1269 sdp_data = (void *) compare->data;
1270 sdp_mask = (void *) compare->mask;
1271
1272 switch (addr->sa_family) {
1273 case AF_INET:
1274 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1275 if (ps == RDMA_PS_SDP) {
1276 sdp_set_ip_ver(sdp_data, 4);
1277 sdp_set_ip_ver(sdp_mask, 0xF);
1278 sdp_data->dst_addr.ip4.addr = ip4_addr;
1279 sdp_mask->dst_addr.ip4.addr = htonl(~0);
1280 } else {
1281 cma_set_ip_ver(cma_data, 4);
1282 cma_set_ip_ver(cma_mask, 0xF);
1283 cma_data->dst_addr.ip4.addr = ip4_addr;
1284 cma_mask->dst_addr.ip4.addr = htonl(~0);
1285 }
1286 break;
1287 case AF_INET6:
1288 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1289 if (ps == RDMA_PS_SDP) {
1290 sdp_set_ip_ver(sdp_data, 6);
1291 sdp_set_ip_ver(sdp_mask, 0xF);
1292 sdp_data->dst_addr.ip6 = ip6_addr;
1293 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1294 sizeof sdp_mask->dst_addr.ip6);
1295 } else {
1296 cma_set_ip_ver(cma_data, 6);
1297 cma_set_ip_ver(cma_mask, 0xF);
1298 cma_data->dst_addr.ip6 = ip6_addr;
1299 memset(&cma_mask->dst_addr.ip6, 0xFF,
1300 sizeof cma_mask->dst_addr.ip6);
1301 }
1302 break;
1303 default:
1304 break;
1305 }
1306}
1307
1308static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1309{
1310 struct rdma_id_private *id_priv = iw_id->context;
1311 struct rdma_cm_event event;
1312 struct sockaddr_in *sin;
1313 int ret = 0;
1314
1315 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
1316 return 0;
1317
1318 memset(&event, 0, sizeof event);
1319 switch (iw_event->event) {
1320 case IW_CM_EVENT_CLOSE:
1321 event.event = RDMA_CM_EVENT_DISCONNECTED;
1322 break;
1323 case IW_CM_EVENT_CONNECT_REPLY:
1324 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1325 *sin = iw_event->local_addr;
1326 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1327 *sin = iw_event->remote_addr;
1328 switch (iw_event->status) {
1329 case 0:
1330 event.event = RDMA_CM_EVENT_ESTABLISHED;
1331 break;
1332 case -ECONNRESET:
1333 case -ECONNREFUSED:
1334 event.event = RDMA_CM_EVENT_REJECTED;
1335 break;
1336 case -ETIMEDOUT:
1337 event.event = RDMA_CM_EVENT_UNREACHABLE;
1338 break;
1339 default:
1340 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1341 break;
1342 }
1343 break;
1344 case IW_CM_EVENT_ESTABLISHED:
1345 event.event = RDMA_CM_EVENT_ESTABLISHED;
1346 break;
1347 default:
1348 BUG_ON(1);
1349 }
1350
1351 event.status = iw_event->status;
1352 event.param.conn.private_data = iw_event->private_data;
1353 event.param.conn.private_data_len = iw_event->private_data_len;
1354 ret = id_priv->id.event_handler(&id_priv->id, &event);
1355 if (ret) {
1356 /* Destroy the CM ID by returning a non-zero value. */
1357 id_priv->cm_id.iw = NULL;
1358 cma_exch(id_priv, RDMA_CM_DESTROYING);
1359 mutex_unlock(&id_priv->handler_mutex);
1360 rdma_destroy_id(&id_priv->id);
1361 return ret;
1362 }
1363
1364 mutex_unlock(&id_priv->handler_mutex);
1365 return ret;
1366}
1367
1368static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1369 struct iw_cm_event *iw_event)
1370{
1371 struct rdma_cm_id *new_cm_id;
1372 struct rdma_id_private *listen_id, *conn_id;
1373 struct sockaddr_in *sin;
1374 struct net_device *dev = NULL;
1375 struct rdma_cm_event event;
1376 int ret;
1377 struct ib_device_attr attr;
1378
1379 listen_id = cm_id->context;
1380 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1381 return -ECONNABORTED;
1382
1383 /* Create a new RDMA id for the new IW CM ID */
1384 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1385 listen_id->id.context,
1386 RDMA_PS_TCP, IB_QPT_RC);
1387 if (IS_ERR(new_cm_id)) {
1388 ret = -ENOMEM;
1389 goto out;
1390 }
1391 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1392 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1393 conn_id->state = RDMA_CM_CONNECT;
1394
1395 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1396 if (!dev) {
1397 ret = -EADDRNOTAVAIL;
1398 mutex_unlock(&conn_id->handler_mutex);
1399 rdma_destroy_id(new_cm_id);
1400 goto out;
1401 }
1402 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1403 if (ret) {
1404 mutex_unlock(&conn_id->handler_mutex);
1405 rdma_destroy_id(new_cm_id);
1406 goto out;
1407 }
1408
1409 ret = cma_acquire_dev(conn_id);
1410 if (ret) {
1411 mutex_unlock(&conn_id->handler_mutex);
1412 rdma_destroy_id(new_cm_id);
1413 goto out;
1414 }
1415
1416 conn_id->cm_id.iw = cm_id;
1417 cm_id->context = conn_id;
1418 cm_id->cm_handler = cma_iw_handler;
1419
1420 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1421 *sin = iw_event->local_addr;
1422 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1423 *sin = iw_event->remote_addr;
1424
1425 ret = ib_query_device(conn_id->id.device, &attr);
1426 if (ret) {
1427 mutex_unlock(&conn_id->handler_mutex);
1428 rdma_destroy_id(new_cm_id);
1429 goto out;
1430 }
1431
1432 memset(&event, 0, sizeof event);
1433 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1434 event.param.conn.private_data = iw_event->private_data;
1435 event.param.conn.private_data_len = iw_event->private_data_len;
1436 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
1437 event.param.conn.responder_resources = attr.max_qp_rd_atom;
1438
1439 /*
1440 * Protect against the user destroying conn_id from another thread
1441 * until we're done accessing it.
1442 */
1443 atomic_inc(&conn_id->refcount);
1444 ret = conn_id->id.event_handler(&conn_id->id, &event);
1445 if (ret) {
1446 /* User wants to destroy the CM ID */
1447 conn_id->cm_id.iw = NULL;
1448 cma_exch(conn_id, RDMA_CM_DESTROYING);
1449 mutex_unlock(&conn_id->handler_mutex);
1450 cma_deref_id(conn_id);
1451 rdma_destroy_id(&conn_id->id);
1452 goto out;
1453 }
1454
1455 mutex_unlock(&conn_id->handler_mutex);
1456 cma_deref_id(conn_id);
1457
1458out:
1459 if (dev)
1460 dev_put(dev);
1461 mutex_unlock(&listen_id->handler_mutex);
1462 return ret;
1463}
1464
1465static int cma_ib_listen(struct rdma_id_private *id_priv)
1466{
1467 struct ib_cm_compare_data compare_data;
1468 struct sockaddr *addr;
1469 struct ib_cm_id *id;
1470 __be64 svc_id;
1471 int ret;
1472
1473 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
1474 if (IS_ERR(id))
1475 return PTR_ERR(id);
1476
1477 id_priv->cm_id.ib = id;
1478
1479 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1480 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1481 if (cma_any_addr(addr))
1482 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1483 else {
1484 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1485 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1486 }
1487
1488 if (ret) {
1489 ib_destroy_cm_id(id_priv->cm_id.ib);
1490 id_priv->cm_id.ib = NULL;
1491 }
1492
1493 return ret;
1494}
1495
1496static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1497{
1498 int ret;
1499 struct sockaddr_in *sin;
1500 struct iw_cm_id *id;
1501
1502 id = iw_create_cm_id(id_priv->id.device,
1503 iw_conn_req_handler,
1504 id_priv);
1505 if (IS_ERR(id))
1506 return PTR_ERR(id);
1507
1508 id_priv->cm_id.iw = id;
1509
1510 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1511 id_priv->cm_id.iw->local_addr = *sin;
1512
1513 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1514
1515 if (ret) {
1516 iw_destroy_cm_id(id_priv->cm_id.iw);
1517 id_priv->cm_id.iw = NULL;
1518 }
1519
1520 return ret;
1521}
1522
1523static int cma_listen_handler(struct rdma_cm_id *id,
1524 struct rdma_cm_event *event)
1525{
1526 struct rdma_id_private *id_priv = id->context;
1527
1528 id->context = id_priv->id.context;
1529 id->event_handler = id_priv->id.event_handler;
1530 return id_priv->id.event_handler(id, event);
1531}
1532
1533static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1534 struct cma_device *cma_dev)
1535{
1536 struct rdma_id_private *dev_id_priv;
1537 struct rdma_cm_id *id;
1538 int ret;
1539
1540 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1541 id_priv->id.qp_type);
1542 if (IS_ERR(id))
1543 return;
1544
1545 dev_id_priv = container_of(id, struct rdma_id_private, id);
1546
1547 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
1548 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1549 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1550
1551 cma_attach_to_dev(dev_id_priv, cma_dev);
1552 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1553 atomic_inc(&id_priv->refcount);
1554 dev_id_priv->internal_id = 1;
1555
1556 ret = rdma_listen(id, id_priv->backlog);
1557 if (ret)
1558 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
1559 "listening on device %s\n", ret, cma_dev->device->name);
1560}
1561
1562static void cma_listen_on_all(struct rdma_id_private *id_priv)
1563{
1564 struct cma_device *cma_dev;
1565
1566 mutex_lock(&lock);
1567 list_add_tail(&id_priv->list, &listen_any_list);
1568 list_for_each_entry(cma_dev, &dev_list, list)
1569 cma_listen_on_dev(id_priv, cma_dev);
1570 mutex_unlock(&lock);
1571}
1572
1573void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1574{
1575 struct rdma_id_private *id_priv;
1576
1577 id_priv = container_of(id, struct rdma_id_private, id);
1578 id_priv->tos = (u8) tos;
1579}
1580EXPORT_SYMBOL(rdma_set_service_type);
1581
1582static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1583 void *context)
1584{
1585 struct cma_work *work = context;
1586 struct rdma_route *route;
1587
1588 route = &work->id->id.route;
1589
1590 if (!status) {
1591 route->num_paths = 1;
1592 *route->path_rec = *path_rec;
1593 } else {
1594 work->old_state = RDMA_CM_ROUTE_QUERY;
1595 work->new_state = RDMA_CM_ADDR_RESOLVED;
1596 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1597 work->event.status = status;
1598 }
1599
1600 queue_work(cma_wq, &work->work);
1601}
1602
1603static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1604 struct cma_work *work)
1605{
1606 struct rdma_addr *addr = &id_priv->id.route.addr;
1607 struct ib_sa_path_rec path_rec;
1608 ib_sa_comp_mask comp_mask;
1609 struct sockaddr_in6 *sin6;
1610
1611 memset(&path_rec, 0, sizeof path_rec);
1612 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1613 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1614 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1615 path_rec.numb_path = 1;
1616 path_rec.reversible = 1;
1617 path_rec.service_id = cma_get_service_id(id_priv->id.ps,
1618 (struct sockaddr *) &addr->dst_addr);
1619
1620 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1621 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1622 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1623
1624 if (addr->src_addr.ss_family == AF_INET) {
1625 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1626 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1627 } else {
1628 sin6 = (struct sockaddr_in6 *) &addr->src_addr;
1629 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1630 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1631 }
1632
1633 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1634 id_priv->id.port_num, &path_rec,
1635 comp_mask, timeout_ms,
1636 GFP_KERNEL, cma_query_handler,
1637 work, &id_priv->query);
1638
1639 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1640}
1641
1642static void cma_work_handler(struct work_struct *_work)
1643{
1644 struct cma_work *work = container_of(_work, struct cma_work, work);
1645 struct rdma_id_private *id_priv = work->id;
1646 int destroy = 0;
1647
1648 mutex_lock(&id_priv->handler_mutex);
1649 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1650 goto out;
1651
1652 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1653 cma_exch(id_priv, RDMA_CM_DESTROYING);
1654 destroy = 1;
1655 }
1656out:
1657 mutex_unlock(&id_priv->handler_mutex);
1658 cma_deref_id(id_priv);
1659 if (destroy)
1660 rdma_destroy_id(&id_priv->id);
1661 kfree(work);
1662}
1663
1664static void cma_ndev_work_handler(struct work_struct *_work)
1665{
1666 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
1667 struct rdma_id_private *id_priv = work->id;
1668 int destroy = 0;
1669
1670 mutex_lock(&id_priv->handler_mutex);
1671 if (id_priv->state == RDMA_CM_DESTROYING ||
1672 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
1673 goto out;
1674
1675 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1676 cma_exch(id_priv, RDMA_CM_DESTROYING);
1677 destroy = 1;
1678 }
1679
1680out:
1681 mutex_unlock(&id_priv->handler_mutex);
1682 cma_deref_id(id_priv);
1683 if (destroy)
1684 rdma_destroy_id(&id_priv->id);
1685 kfree(work);
1686}
1687
1688static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1689{
1690 struct rdma_route *route = &id_priv->id.route;
1691 struct cma_work *work;
1692 int ret;
1693
1694 work = kzalloc(sizeof *work, GFP_KERNEL);
1695 if (!work)
1696 return -ENOMEM;
1697
1698 work->id = id_priv;
1699 INIT_WORK(&work->work, cma_work_handler);
1700 work->old_state = RDMA_CM_ROUTE_QUERY;
1701 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1702 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1703
1704 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1705 if (!route->path_rec) {
1706 ret = -ENOMEM;
1707 goto err1;
1708 }
1709
1710 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1711 if (ret)
1712 goto err2;
1713
1714 return 0;
1715err2:
1716 kfree(route->path_rec);
1717 route->path_rec = NULL;
1718err1:
1719 kfree(work);
1720 return ret;
1721}
1722
1723int rdma_set_ib_paths(struct rdma_cm_id *id,
1724 struct ib_sa_path_rec *path_rec, int num_paths)
1725{
1726 struct rdma_id_private *id_priv;
1727 int ret;
1728
1729 id_priv = container_of(id, struct rdma_id_private, id);
1730 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1731 RDMA_CM_ROUTE_RESOLVED))
1732 return -EINVAL;
1733
1734 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1735 GFP_KERNEL);
1736 if (!id->route.path_rec) {
1737 ret = -ENOMEM;
1738 goto err;
1739 }
1740
1741 id->route.num_paths = num_paths;
1742 return 0;
1743err:
1744 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
1745 return ret;
1746}
1747EXPORT_SYMBOL(rdma_set_ib_paths);
1748
1749static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1750{
1751 struct cma_work *work;
1752
1753 work = kzalloc(sizeof *work, GFP_KERNEL);
1754 if (!work)
1755 return -ENOMEM;
1756
1757 work->id = id_priv;
1758 INIT_WORK(&work->work, cma_work_handler);
1759 work->old_state = RDMA_CM_ROUTE_QUERY;
1760 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1761 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1762 queue_work(cma_wq, &work->work);
1763 return 0;
1764}
1765
1766static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1767{
1768 struct rdma_route *route = &id_priv->id.route;
1769 struct rdma_addr *addr = &route->addr;
1770 struct cma_work *work;
1771 int ret;
1772 struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr;
1773 struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr;
1774 struct net_device *ndev = NULL;
1775 u16 vid;
1776
1777 if (src_addr->sin_family != dst_addr->sin_family)
1778 return -EINVAL;
1779
1780 work = kzalloc(sizeof *work, GFP_KERNEL);
1781 if (!work)
1782 return -ENOMEM;
1783
1784 work->id = id_priv;
1785 INIT_WORK(&work->work, cma_work_handler);
1786
1787 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
1788 if (!route->path_rec) {
1789 ret = -ENOMEM;
1790 goto err1;
1791 }
1792
1793 route->num_paths = 1;
1794
1795 if (addr->dev_addr.bound_dev_if)
1796 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
1797 if (!ndev) {
1798 ret = -ENODEV;
1799 goto err2;
1800 }
1801
1802 vid = rdma_vlan_dev_vlan_id(ndev);
1803
1804 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
1805 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
1806
1807 route->path_rec->hop_limit = 1;
1808 route->path_rec->reversible = 1;
1809 route->path_rec->pkey = cpu_to_be16(0xffff);
1810 route->path_rec->mtu_selector = IB_SA_EQ;
1811 route->path_rec->sl = id_priv->tos >> 5;
1812
1813 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
1814 route->path_rec->rate_selector = IB_SA_EQ;
1815 route->path_rec->rate = iboe_get_rate(ndev);
1816 dev_put(ndev);
1817 route->path_rec->packet_life_time_selector = IB_SA_EQ;
1818 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
1819 if (!route->path_rec->mtu) {
1820 ret = -EINVAL;
1821 goto err2;
1822 }
1823
1824 work->old_state = RDMA_CM_ROUTE_QUERY;
1825 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1826 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1827 work->event.status = 0;
1828
1829 queue_work(cma_wq, &work->work);
1830
1831 return 0;
1832
1833err2:
1834 kfree(route->path_rec);
1835 route->path_rec = NULL;
1836err1:
1837 kfree(work);
1838 return ret;
1839}
1840
1841int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1842{
1843 struct rdma_id_private *id_priv;
1844 int ret;
1845
1846 id_priv = container_of(id, struct rdma_id_private, id);
1847 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
1848 return -EINVAL;
1849
1850 atomic_inc(&id_priv->refcount);
1851 switch (rdma_node_get_transport(id->device->node_type)) {
1852 case RDMA_TRANSPORT_IB:
1853 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
1854 case IB_LINK_LAYER_INFINIBAND:
1855 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1856 break;
1857 case IB_LINK_LAYER_ETHERNET:
1858 ret = cma_resolve_iboe_route(id_priv);
1859 break;
1860 default:
1861 ret = -ENOSYS;
1862 }
1863 break;
1864 case RDMA_TRANSPORT_IWARP:
1865 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1866 break;
1867 default:
1868 ret = -ENOSYS;
1869 break;
1870 }
1871 if (ret)
1872 goto err;
1873
1874 return 0;
1875err:
1876 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
1877 cma_deref_id(id_priv);
1878 return ret;
1879}
1880EXPORT_SYMBOL(rdma_resolve_route);
1881
1882static int cma_bind_loopback(struct rdma_id_private *id_priv)
1883{
1884 struct cma_device *cma_dev;
1885 struct ib_port_attr port_attr;
1886 union ib_gid gid;
1887 u16 pkey;
1888 int ret;
1889 u8 p;
1890
1891 mutex_lock(&lock);
1892 if (list_empty(&dev_list)) {
1893 ret = -ENODEV;
1894 goto out;
1895 }
1896 list_for_each_entry(cma_dev, &dev_list, list)
1897 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1898 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1899 port_attr.state == IB_PORT_ACTIVE)
1900 goto port_found;
1901
1902 p = 1;
1903 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1904
1905port_found:
1906 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1907 if (ret)
1908 goto out;
1909
1910 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1911 if (ret)
1912 goto out;
1913
1914 id_priv->id.route.addr.dev_addr.dev_type =
1915 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
1916 ARPHRD_INFINIBAND : ARPHRD_ETHER;
1917
1918 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1919 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1920 id_priv->id.port_num = p;
1921 cma_attach_to_dev(id_priv, cma_dev);
1922out:
1923 mutex_unlock(&lock);
1924 return ret;
1925}
1926
1927static void addr_handler(int status, struct sockaddr *src_addr,
1928 struct rdma_dev_addr *dev_addr, void *context)
1929{
1930 struct rdma_id_private *id_priv = context;
1931 struct rdma_cm_event event;
1932
1933 memset(&event, 0, sizeof event);
1934 mutex_lock(&id_priv->handler_mutex);
1935 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
1936 RDMA_CM_ADDR_RESOLVED))
1937 goto out;
1938
1939 if (!status && !id_priv->cma_dev)
1940 status = cma_acquire_dev(id_priv);
1941
1942 if (status) {
1943 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1944 RDMA_CM_ADDR_BOUND))
1945 goto out;
1946 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1947 event.status = status;
1948 } else {
1949 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1950 ip_addr_size(src_addr));
1951 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1952 }
1953
1954 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1955 cma_exch(id_priv, RDMA_CM_DESTROYING);
1956 mutex_unlock(&id_priv->handler_mutex);
1957 cma_deref_id(id_priv);
1958 rdma_destroy_id(&id_priv->id);
1959 return;
1960 }
1961out:
1962 mutex_unlock(&id_priv->handler_mutex);
1963 cma_deref_id(id_priv);
1964}
1965
1966static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1967{
1968 struct cma_work *work;
1969 struct sockaddr *src, *dst;
1970 union ib_gid gid;
1971 int ret;
1972
1973 work = kzalloc(sizeof *work, GFP_KERNEL);
1974 if (!work)
1975 return -ENOMEM;
1976
1977 if (!id_priv->cma_dev) {
1978 ret = cma_bind_loopback(id_priv);
1979 if (ret)
1980 goto err;
1981 }
1982
1983 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1984 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1985
1986 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1987 if (cma_zero_addr(src)) {
1988 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
1989 if ((src->sa_family = dst->sa_family) == AF_INET) {
1990 ((struct sockaddr_in *) src)->sin_addr.s_addr =
1991 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
1992 } else {
1993 ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
1994 &((struct sockaddr_in6 *) dst)->sin6_addr);
1995 }
1996 }
1997
1998 work->id = id_priv;
1999 INIT_WORK(&work->work, cma_work_handler);
2000 work->old_state = RDMA_CM_ADDR_QUERY;
2001 work->new_state = RDMA_CM_ADDR_RESOLVED;
2002 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2003 queue_work(cma_wq, &work->work);
2004 return 0;
2005err:
2006 kfree(work);
2007 return ret;
2008}
2009
2010static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2011 struct sockaddr *dst_addr)
2012{
2013 if (!src_addr || !src_addr->sa_family) {
2014 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2015 if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) {
2016 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
2017 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
2018 }
2019 }
2020 return rdma_bind_addr(id, src_addr);
2021}
2022
2023int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2024 struct sockaddr *dst_addr, int timeout_ms)
2025{
2026 struct rdma_id_private *id_priv;
2027 int ret;
2028
2029 id_priv = container_of(id, struct rdma_id_private, id);
2030 if (id_priv->state == RDMA_CM_IDLE) {
2031 ret = cma_bind_addr(id, src_addr, dst_addr);
2032 if (ret)
2033 return ret;
2034 }
2035
2036 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
2037 return -EINVAL;
2038
2039 atomic_inc(&id_priv->refcount);
2040 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
2041 if (cma_any_addr(dst_addr))
2042 ret = cma_resolve_loopback(id_priv);
2043 else
2044 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
2045 dst_addr, &id->route.addr.dev_addr,
2046 timeout_ms, addr_handler, id_priv);
2047 if (ret)
2048 goto err;
2049
2050 return 0;
2051err:
2052 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
2053 cma_deref_id(id_priv);
2054 return ret;
2055}
2056EXPORT_SYMBOL(rdma_resolve_addr);
2057
2058int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2059{
2060 struct rdma_id_private *id_priv;
2061 unsigned long flags;
2062 int ret;
2063
2064 id_priv = container_of(id, struct rdma_id_private, id);
2065 spin_lock_irqsave(&id_priv->lock, flags);
2066 if (id_priv->state == RDMA_CM_IDLE) {
2067 id_priv->reuseaddr = reuse;
2068 ret = 0;
2069 } else {
2070 ret = -EINVAL;
2071 }
2072 spin_unlock_irqrestore(&id_priv->lock, flags);
2073 return ret;
2074}
2075EXPORT_SYMBOL(rdma_set_reuseaddr);
2076
2077static void cma_bind_port(struct rdma_bind_list *bind_list,
2078 struct rdma_id_private *id_priv)
2079{
2080 struct sockaddr_in *sin;
2081
2082 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
2083 sin->sin_port = htons(bind_list->port);
2084 id_priv->bind_list = bind_list;
2085 hlist_add_head(&id_priv->node, &bind_list->owners);
2086}
2087
2088static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
2089 unsigned short snum)
2090{
2091 struct rdma_bind_list *bind_list;
2092 int port, ret;
2093
2094 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
2095 if (!bind_list)
2096 return -ENOMEM;
2097
2098 do {
2099 ret = idr_get_new_above(ps, bind_list, snum, &port);
2100 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
2101
2102 if (ret)
2103 goto err1;
2104
2105 if (port != snum) {
2106 ret = -EADDRNOTAVAIL;
2107 goto err2;
2108 }
2109
2110 bind_list->ps = ps;
2111 bind_list->port = (unsigned short) port;
2112 cma_bind_port(bind_list, id_priv);
2113 return 0;
2114err2:
2115 idr_remove(ps, port);
2116err1:
2117 kfree(bind_list);
2118 return ret;
2119}
2120
2121static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
2122{
2123 static unsigned int last_used_port;
2124 int low, high, remaining;
2125 unsigned int rover;
2126
2127 inet_get_local_port_range(&low, &high);
2128 remaining = (high - low) + 1;
2129 rover = net_random() % remaining + low;
2130retry:
2131 if (last_used_port != rover &&
2132 !idr_find(ps, (unsigned short) rover)) {
2133 int ret = cma_alloc_port(ps, id_priv, rover);
2134 /*
2135 * Remember previously used port number in order to avoid
2136 * re-using same port immediately after it is closed.
2137 */
2138 if (!ret)
2139 last_used_port = rover;
2140 if (ret != -EADDRNOTAVAIL)
2141 return ret;
2142 }
2143 if (--remaining) {
2144 rover++;
2145 if ((rover < low) || (rover > high))
2146 rover = low;
2147 goto retry;
2148 }
2149 return -EADDRNOTAVAIL;
2150}
2151
2152/*
2153 * Check that the requested port is available. This is called when trying to
2154 * bind to a specific port, or when trying to listen on a bound port. In
2155 * the latter case, the provided id_priv may already be on the bind_list, but
2156 * we still need to check that it's okay to start listening.
2157 */
2158static int cma_check_port(struct rdma_bind_list *bind_list,
2159 struct rdma_id_private *id_priv, uint8_t reuseaddr)
2160{
2161 struct rdma_id_private *cur_id;
2162 struct sockaddr *addr, *cur_addr;
2163 struct hlist_node *node;
2164
2165 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
2166 if (cma_any_addr(addr) && !reuseaddr)
2167 return -EADDRNOTAVAIL;
2168
2169 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2170 if (id_priv == cur_id)
2171 continue;
2172
2173 if ((cur_id->state == RDMA_CM_LISTEN) ||
2174 !reuseaddr || !cur_id->reuseaddr) {
2175 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
2176 if (cma_any_addr(cur_addr))
2177 return -EADDRNOTAVAIL;
2178
2179 if (!cma_addr_cmp(addr, cur_addr))
2180 return -EADDRINUSE;
2181 }
2182 }
2183 return 0;
2184}
2185
2186static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2187{
2188 struct rdma_bind_list *bind_list;
2189 unsigned short snum;
2190 int ret;
2191
2192 snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr));
2193 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2194 return -EACCES;
2195
2196 bind_list = idr_find(ps, snum);
2197 if (!bind_list) {
2198 ret = cma_alloc_port(ps, id_priv, snum);
2199 } else {
2200 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2201 if (!ret)
2202 cma_bind_port(bind_list, id_priv);
2203 }
2204 return ret;
2205}
2206
2207static int cma_bind_listen(struct rdma_id_private *id_priv)
2208{
2209 struct rdma_bind_list *bind_list = id_priv->bind_list;
2210 int ret = 0;
2211
2212 mutex_lock(&lock);
2213 if (bind_list->owners.first->next)
2214 ret = cma_check_port(bind_list, id_priv, 0);
2215 mutex_unlock(&lock);
2216 return ret;
2217}
2218
2219static int cma_get_port(struct rdma_id_private *id_priv)
2220{
2221 struct idr *ps;
2222 int ret;
2223
2224 switch (id_priv->id.ps) {
2225 case RDMA_PS_SDP:
2226 ps = &sdp_ps;
2227 break;
2228 case RDMA_PS_TCP:
2229 ps = &tcp_ps;
2230 break;
2231 case RDMA_PS_UDP:
2232 ps = &udp_ps;
2233 break;
2234 case RDMA_PS_IPOIB:
2235 ps = &ipoib_ps;
2236 break;
2237 default:
2238 return -EPROTONOSUPPORT;
2239 }
2240
2241 mutex_lock(&lock);
2242 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2243 ret = cma_alloc_any_port(ps, id_priv);
2244 else
2245 ret = cma_use_port(ps, id_priv);
2246 mutex_unlock(&lock);
2247
2248 return ret;
2249}
2250
2251static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2252 struct sockaddr *addr)
2253{
2254#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2255 struct sockaddr_in6 *sin6;
2256
2257 if (addr->sa_family != AF_INET6)
2258 return 0;
2259
2260 sin6 = (struct sockaddr_in6 *) addr;
2261 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
2262 !sin6->sin6_scope_id)
2263 return -EINVAL;
2264
2265 dev_addr->bound_dev_if = sin6->sin6_scope_id;
2266#endif
2267 return 0;
2268}
2269
2270int rdma_listen(struct rdma_cm_id *id, int backlog)
2271{
2272 struct rdma_id_private *id_priv;
2273 int ret;
2274
2275 id_priv = container_of(id, struct rdma_id_private, id);
2276 if (id_priv->state == RDMA_CM_IDLE) {
2277 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
2278 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
2279 if (ret)
2280 return ret;
2281 }
2282
2283 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
2284 return -EINVAL;
2285
2286 if (id_priv->reuseaddr) {
2287 ret = cma_bind_listen(id_priv);
2288 if (ret)
2289 goto err;
2290 }
2291
2292 id_priv->backlog = backlog;
2293 if (id->device) {
2294 switch (rdma_node_get_transport(id->device->node_type)) {
2295 case RDMA_TRANSPORT_IB:
2296 ret = cma_ib_listen(id_priv);
2297 if (ret)
2298 goto err;
2299 break;
2300 case RDMA_TRANSPORT_IWARP:
2301 ret = cma_iw_listen(id_priv, backlog);
2302 if (ret)
2303 goto err;
2304 break;
2305 default:
2306 ret = -ENOSYS;
2307 goto err;
2308 }
2309 } else
2310 cma_listen_on_all(id_priv);
2311
2312 return 0;
2313err:
2314 id_priv->backlog = 0;
2315 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
2316 return ret;
2317}
2318EXPORT_SYMBOL(rdma_listen);
2319
2320int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2321{
2322 struct rdma_id_private *id_priv;
2323 int ret;
2324
2325 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
2326 return -EAFNOSUPPORT;
2327
2328 id_priv = container_of(id, struct rdma_id_private, id);
2329 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
2330 return -EINVAL;
2331
2332 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2333 if (ret)
2334 goto err1;
2335
2336 if (!cma_any_addr(addr)) {
2337 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2338 if (ret)
2339 goto err1;
2340
2341 ret = cma_acquire_dev(id_priv);
2342 if (ret)
2343 goto err1;
2344 }
2345
2346 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2347 ret = cma_get_port(id_priv);
2348 if (ret)
2349 goto err2;
2350
2351 return 0;
2352err2:
2353 if (id_priv->cma_dev)
2354 cma_release_dev(id_priv);
2355err1:
2356 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
2357 return ret;
2358}
2359EXPORT_SYMBOL(rdma_bind_addr);
2360
2361static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2362 struct rdma_route *route)
2363{
2364 struct cma_hdr *cma_hdr;
2365 struct sdp_hh *sdp_hdr;
2366
2367 if (route->addr.src_addr.ss_family == AF_INET) {
2368 struct sockaddr_in *src4, *dst4;
2369
2370 src4 = (struct sockaddr_in *) &route->addr.src_addr;
2371 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2372
2373 switch (ps) {
2374 case RDMA_PS_SDP:
2375 sdp_hdr = hdr;
2376 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2377 return -EINVAL;
2378 sdp_set_ip_ver(sdp_hdr, 4);
2379 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2380 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2381 sdp_hdr->port = src4->sin_port;
2382 break;
2383 default:
2384 cma_hdr = hdr;
2385 cma_hdr->cma_version = CMA_VERSION;
2386 cma_set_ip_ver(cma_hdr, 4);
2387 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2388 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2389 cma_hdr->port = src4->sin_port;
2390 break;
2391 }
2392 } else {
2393 struct sockaddr_in6 *src6, *dst6;
2394
2395 src6 = (struct sockaddr_in6 *) &route->addr.src_addr;
2396 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr;
2397
2398 switch (ps) {
2399 case RDMA_PS_SDP:
2400 sdp_hdr = hdr;
2401 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2402 return -EINVAL;
2403 sdp_set_ip_ver(sdp_hdr, 6);
2404 sdp_hdr->src_addr.ip6 = src6->sin6_addr;
2405 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr;
2406 sdp_hdr->port = src6->sin6_port;
2407 break;
2408 default:
2409 cma_hdr = hdr;
2410 cma_hdr->cma_version = CMA_VERSION;
2411 cma_set_ip_ver(cma_hdr, 6);
2412 cma_hdr->src_addr.ip6 = src6->sin6_addr;
2413 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
2414 cma_hdr->port = src6->sin6_port;
2415 break;
2416 }
2417 }
2418 return 0;
2419}
2420
2421static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2422 struct ib_cm_event *ib_event)
2423{
2424 struct rdma_id_private *id_priv = cm_id->context;
2425 struct rdma_cm_event event;
2426 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2427 int ret = 0;
2428
2429 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
2430 return 0;
2431
2432 memset(&event, 0, sizeof event);
2433 switch (ib_event->event) {
2434 case IB_CM_SIDR_REQ_ERROR:
2435 event.event = RDMA_CM_EVENT_UNREACHABLE;
2436 event.status = -ETIMEDOUT;
2437 break;
2438 case IB_CM_SIDR_REP_RECEIVED:
2439 event.param.ud.private_data = ib_event->private_data;
2440 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2441 if (rep->status != IB_SIDR_SUCCESS) {
2442 event.event = RDMA_CM_EVENT_UNREACHABLE;
2443 event.status = ib_event->param.sidr_rep_rcvd.status;
2444 break;
2445 }
2446 ret = cma_set_qkey(id_priv);
2447 if (ret) {
2448 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2449 event.status = -EINVAL;
2450 break;
2451 }
2452 if (id_priv->qkey != rep->qkey) {
2453 event.event = RDMA_CM_EVENT_UNREACHABLE;
2454 event.status = -EINVAL;
2455 break;
2456 }
2457 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2458 id_priv->id.route.path_rec,
2459 &event.param.ud.ah_attr);
2460 event.param.ud.qp_num = rep->qpn;
2461 event.param.ud.qkey = rep->qkey;
2462 event.event = RDMA_CM_EVENT_ESTABLISHED;
2463 event.status = 0;
2464 break;
2465 default:
2466 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
2467 ib_event->event);
2468 goto out;
2469 }
2470
2471 ret = id_priv->id.event_handler(&id_priv->id, &event);
2472 if (ret) {
2473 /* Destroy the CM ID by returning a non-zero value. */
2474 id_priv->cm_id.ib = NULL;
2475 cma_exch(id_priv, RDMA_CM_DESTROYING);
2476 mutex_unlock(&id_priv->handler_mutex);
2477 rdma_destroy_id(&id_priv->id);
2478 return ret;
2479 }
2480out:
2481 mutex_unlock(&id_priv->handler_mutex);
2482 return ret;
2483}
2484
2485static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2486 struct rdma_conn_param *conn_param)
2487{
2488 struct ib_cm_sidr_req_param req;
2489 struct rdma_route *route;
2490 struct ib_cm_id *id;
2491 int ret;
2492
2493 req.private_data_len = sizeof(struct cma_hdr) +
2494 conn_param->private_data_len;
2495 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2496 if (!req.private_data)
2497 return -ENOMEM;
2498
2499 if (conn_param->private_data && conn_param->private_data_len)
2500 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2501 conn_param->private_data, conn_param->private_data_len);
2502
2503 route = &id_priv->id.route;
2504 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2505 if (ret)
2506 goto out;
2507
2508 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
2509 id_priv);
2510 if (IS_ERR(id)) {
2511 ret = PTR_ERR(id);
2512 goto out;
2513 }
2514 id_priv->cm_id.ib = id;
2515
2516 req.path = route->path_rec;
2517 req.service_id = cma_get_service_id(id_priv->id.ps,
2518 (struct sockaddr *) &route->addr.dst_addr);
2519 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2520 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2521
2522 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2523 if (ret) {
2524 ib_destroy_cm_id(id_priv->cm_id.ib);
2525 id_priv->cm_id.ib = NULL;
2526 }
2527out:
2528 kfree(req.private_data);
2529 return ret;
2530}
2531
2532static int cma_connect_ib(struct rdma_id_private *id_priv,
2533 struct rdma_conn_param *conn_param)
2534{
2535 struct ib_cm_req_param req;
2536 struct rdma_route *route;
2537 void *private_data;
2538 struct ib_cm_id *id;
2539 int offset, ret;
2540
2541 memset(&req, 0, sizeof req);
2542 offset = cma_user_data_offset(id_priv->id.ps);
2543 req.private_data_len = offset + conn_param->private_data_len;
2544 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2545 if (!private_data)
2546 return -ENOMEM;
2547
2548 if (conn_param->private_data && conn_param->private_data_len)
2549 memcpy(private_data + offset, conn_param->private_data,
2550 conn_param->private_data_len);
2551
2552 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
2553 if (IS_ERR(id)) {
2554 ret = PTR_ERR(id);
2555 goto out;
2556 }
2557 id_priv->cm_id.ib = id;
2558
2559 route = &id_priv->id.route;
2560 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2561 if (ret)
2562 goto out;
2563 req.private_data = private_data;
2564
2565 req.primary_path = &route->path_rec[0];
2566 if (route->num_paths == 2)
2567 req.alternate_path = &route->path_rec[1];
2568
2569 req.service_id = cma_get_service_id(id_priv->id.ps,
2570 (struct sockaddr *) &route->addr.dst_addr);
2571 req.qp_num = id_priv->qp_num;
2572 req.qp_type = IB_QPT_RC;
2573 req.starting_psn = id_priv->seq_num;
2574 req.responder_resources = conn_param->responder_resources;
2575 req.initiator_depth = conn_param->initiator_depth;
2576 req.flow_control = conn_param->flow_control;
2577 req.retry_count = conn_param->retry_count;
2578 req.rnr_retry_count = conn_param->rnr_retry_count;
2579 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2580 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2581 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2582 req.srq = id_priv->srq ? 1 : 0;
2583
2584 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2585out:
2586 if (ret && !IS_ERR(id)) {
2587 ib_destroy_cm_id(id);
2588 id_priv->cm_id.ib = NULL;
2589 }
2590
2591 kfree(private_data);
2592 return ret;
2593}
2594
2595static int cma_connect_iw(struct rdma_id_private *id_priv,
2596 struct rdma_conn_param *conn_param)
2597{
2598 struct iw_cm_id *cm_id;
2599 struct sockaddr_in* sin;
2600 int ret;
2601 struct iw_cm_conn_param iw_param;
2602
2603 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2604 if (IS_ERR(cm_id))
2605 return PTR_ERR(cm_id);
2606
2607 id_priv->cm_id.iw = cm_id;
2608
2609 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2610 cm_id->local_addr = *sin;
2611
2612 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2613 cm_id->remote_addr = *sin;
2614
2615 ret = cma_modify_qp_rtr(id_priv, conn_param);
2616 if (ret)
2617 goto out;
2618
2619 iw_param.ord = conn_param->initiator_depth;
2620 iw_param.ird = conn_param->responder_resources;
2621 iw_param.private_data = conn_param->private_data;
2622 iw_param.private_data_len = conn_param->private_data_len;
2623 if (id_priv->id.qp)
2624 iw_param.qpn = id_priv->qp_num;
2625 else
2626 iw_param.qpn = conn_param->qp_num;
2627 ret = iw_cm_connect(cm_id, &iw_param);
2628out:
2629 if (ret) {
2630 iw_destroy_cm_id(cm_id);
2631 id_priv->cm_id.iw = NULL;
2632 }
2633 return ret;
2634}
2635
2636int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2637{
2638 struct rdma_id_private *id_priv;
2639 int ret;
2640
2641 id_priv = container_of(id, struct rdma_id_private, id);
2642 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
2643 return -EINVAL;
2644
2645 if (!id->qp) {
2646 id_priv->qp_num = conn_param->qp_num;
2647 id_priv->srq = conn_param->srq;
2648 }
2649
2650 switch (rdma_node_get_transport(id->device->node_type)) {
2651 case RDMA_TRANSPORT_IB:
2652 if (id->qp_type == IB_QPT_UD)
2653 ret = cma_resolve_ib_udp(id_priv, conn_param);
2654 else
2655 ret = cma_connect_ib(id_priv, conn_param);
2656 break;
2657 case RDMA_TRANSPORT_IWARP:
2658 ret = cma_connect_iw(id_priv, conn_param);
2659 break;
2660 default:
2661 ret = -ENOSYS;
2662 break;
2663 }
2664 if (ret)
2665 goto err;
2666
2667 return 0;
2668err:
2669 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
2670 return ret;
2671}
2672EXPORT_SYMBOL(rdma_connect);
2673
2674static int cma_accept_ib(struct rdma_id_private *id_priv,
2675 struct rdma_conn_param *conn_param)
2676{
2677 struct ib_cm_rep_param rep;
2678 int ret;
2679
2680 ret = cma_modify_qp_rtr(id_priv, conn_param);
2681 if (ret)
2682 goto out;
2683
2684 ret = cma_modify_qp_rts(id_priv, conn_param);
2685 if (ret)
2686 goto out;
2687
2688 memset(&rep, 0, sizeof rep);
2689 rep.qp_num = id_priv->qp_num;
2690 rep.starting_psn = id_priv->seq_num;
2691 rep.private_data = conn_param->private_data;
2692 rep.private_data_len = conn_param->private_data_len;
2693 rep.responder_resources = conn_param->responder_resources;
2694 rep.initiator_depth = conn_param->initiator_depth;
2695 rep.failover_accepted = 0;
2696 rep.flow_control = conn_param->flow_control;
2697 rep.rnr_retry_count = conn_param->rnr_retry_count;
2698 rep.srq = id_priv->srq ? 1 : 0;
2699
2700 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2701out:
2702 return ret;
2703}
2704
2705static int cma_accept_iw(struct rdma_id_private *id_priv,
2706 struct rdma_conn_param *conn_param)
2707{
2708 struct iw_cm_conn_param iw_param;
2709 int ret;
2710
2711 ret = cma_modify_qp_rtr(id_priv, conn_param);
2712 if (ret)
2713 return ret;
2714
2715 iw_param.ord = conn_param->initiator_depth;
2716 iw_param.ird = conn_param->responder_resources;
2717 iw_param.private_data = conn_param->private_data;
2718 iw_param.private_data_len = conn_param->private_data_len;
2719 if (id_priv->id.qp) {
2720 iw_param.qpn = id_priv->qp_num;
2721 } else
2722 iw_param.qpn = conn_param->qp_num;
2723
2724 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2725}
2726
2727static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2728 enum ib_cm_sidr_status status,
2729 const void *private_data, int private_data_len)
2730{
2731 struct ib_cm_sidr_rep_param rep;
2732 int ret;
2733
2734 memset(&rep, 0, sizeof rep);
2735 rep.status = status;
2736 if (status == IB_SIDR_SUCCESS) {
2737 ret = cma_set_qkey(id_priv);
2738 if (ret)
2739 return ret;
2740 rep.qp_num = id_priv->qp_num;
2741 rep.qkey = id_priv->qkey;
2742 }
2743 rep.private_data = private_data;
2744 rep.private_data_len = private_data_len;
2745
2746 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2747}
2748
2749int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2750{
2751 struct rdma_id_private *id_priv;
2752 int ret;
2753
2754 id_priv = container_of(id, struct rdma_id_private, id);
2755
2756 id_priv->owner = task_pid_nr(current);
2757
2758 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
2759 return -EINVAL;
2760
2761 if (!id->qp && conn_param) {
2762 id_priv->qp_num = conn_param->qp_num;
2763 id_priv->srq = conn_param->srq;
2764 }
2765
2766 switch (rdma_node_get_transport(id->device->node_type)) {
2767 case RDMA_TRANSPORT_IB:
2768 if (id->qp_type == IB_QPT_UD)
2769 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2770 conn_param->private_data,
2771 conn_param->private_data_len);
2772 else if (conn_param)
2773 ret = cma_accept_ib(id_priv, conn_param);
2774 else
2775 ret = cma_rep_recv(id_priv);
2776 break;
2777 case RDMA_TRANSPORT_IWARP:
2778 ret = cma_accept_iw(id_priv, conn_param);
2779 break;
2780 default:
2781 ret = -ENOSYS;
2782 break;
2783 }
2784
2785 if (ret)
2786 goto reject;
2787
2788 return 0;
2789reject:
2790 cma_modify_qp_err(id_priv);
2791 rdma_reject(id, NULL, 0);
2792 return ret;
2793}
2794EXPORT_SYMBOL(rdma_accept);
2795
2796int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2797{
2798 struct rdma_id_private *id_priv;
2799 int ret;
2800
2801 id_priv = container_of(id, struct rdma_id_private, id);
2802 if (!id_priv->cm_id.ib)
2803 return -EINVAL;
2804
2805 switch (id->device->node_type) {
2806 case RDMA_NODE_IB_CA:
2807 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2808 break;
2809 default:
2810 ret = 0;
2811 break;
2812 }
2813 return ret;
2814}
2815EXPORT_SYMBOL(rdma_notify);
2816
2817int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2818 u8 private_data_len)
2819{
2820 struct rdma_id_private *id_priv;
2821 int ret;
2822
2823 id_priv = container_of(id, struct rdma_id_private, id);
2824 if (!id_priv->cm_id.ib)
2825 return -EINVAL;
2826
2827 switch (rdma_node_get_transport(id->device->node_type)) {
2828 case RDMA_TRANSPORT_IB:
2829 if (id->qp_type == IB_QPT_UD)
2830 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2831 private_data, private_data_len);
2832 else
2833 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2834 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2835 0, private_data, private_data_len);
2836 break;
2837 case RDMA_TRANSPORT_IWARP:
2838 ret = iw_cm_reject(id_priv->cm_id.iw,
2839 private_data, private_data_len);
2840 break;
2841 default:
2842 ret = -ENOSYS;
2843 break;
2844 }
2845 return ret;
2846}
2847EXPORT_SYMBOL(rdma_reject);
2848
2849int rdma_disconnect(struct rdma_cm_id *id)
2850{
2851 struct rdma_id_private *id_priv;
2852 int ret;
2853
2854 id_priv = container_of(id, struct rdma_id_private, id);
2855 if (!id_priv->cm_id.ib)
2856 return -EINVAL;
2857
2858 switch (rdma_node_get_transport(id->device->node_type)) {
2859 case RDMA_TRANSPORT_IB:
2860 ret = cma_modify_qp_err(id_priv);
2861 if (ret)
2862 goto out;
2863 /* Initiate or respond to a disconnect. */
2864 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2865 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2866 break;
2867 case RDMA_TRANSPORT_IWARP:
2868 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2869 break;
2870 default:
2871 ret = -EINVAL;
2872 break;
2873 }
2874out:
2875 return ret;
2876}
2877EXPORT_SYMBOL(rdma_disconnect);
2878
2879static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2880{
2881 struct rdma_id_private *id_priv;
2882 struct cma_multicast *mc = multicast->context;
2883 struct rdma_cm_event event;
2884 int ret;
2885
2886 id_priv = mc->id_priv;
2887 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
2888 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
2889 return 0;
2890
2891 mutex_lock(&id_priv->qp_mutex);
2892 if (!status && id_priv->id.qp)
2893 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2894 multicast->rec.mlid);
2895 mutex_unlock(&id_priv->qp_mutex);
2896
2897 memset(&event, 0, sizeof event);
2898 event.status = status;
2899 event.param.ud.private_data = mc->context;
2900 if (!status) {
2901 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2902 ib_init_ah_from_mcmember(id_priv->id.device,
2903 id_priv->id.port_num, &multicast->rec,
2904 &event.param.ud.ah_attr);
2905 event.param.ud.qp_num = 0xFFFFFF;
2906 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2907 } else
2908 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2909
2910 ret = id_priv->id.event_handler(&id_priv->id, &event);
2911 if (ret) {
2912 cma_exch(id_priv, RDMA_CM_DESTROYING);
2913 mutex_unlock(&id_priv->handler_mutex);
2914 rdma_destroy_id(&id_priv->id);
2915 return 0;
2916 }
2917
2918 mutex_unlock(&id_priv->handler_mutex);
2919 return 0;
2920}
2921
2922static void cma_set_mgid(struct rdma_id_private *id_priv,
2923 struct sockaddr *addr, union ib_gid *mgid)
2924{
2925 unsigned char mc_map[MAX_ADDR_LEN];
2926 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2927 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2928 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2929
2930 if (cma_any_addr(addr)) {
2931 memset(mgid, 0, sizeof *mgid);
2932 } else if ((addr->sa_family == AF_INET6) &&
2933 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
2934 0xFF10A01B)) {
2935 /* IPv6 address is an SA assigned MGID. */
2936 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2937 } else if ((addr->sa_family == AF_INET6)) {
2938 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
2939 if (id_priv->id.ps == RDMA_PS_UDP)
2940 mc_map[7] = 0x01; /* Use RDMA CM signature */
2941 *mgid = *(union ib_gid *) (mc_map + 4);
2942 } else {
2943 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
2944 if (id_priv->id.ps == RDMA_PS_UDP)
2945 mc_map[7] = 0x01; /* Use RDMA CM signature */
2946 *mgid = *(union ib_gid *) (mc_map + 4);
2947 }
2948}
2949
2950static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2951 struct cma_multicast *mc)
2952{
2953 struct ib_sa_mcmember_rec rec;
2954 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2955 ib_sa_comp_mask comp_mask;
2956 int ret;
2957
2958 ib_addr_get_mgid(dev_addr, &rec.mgid);
2959 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2960 &rec.mgid, &rec);
2961 if (ret)
2962 return ret;
2963
2964 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
2965 if (id_priv->id.ps == RDMA_PS_UDP)
2966 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2967 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
2968 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2969 rec.join_state = 1;
2970
2971 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2972 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2973 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2974 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2975 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2976
2977 if (id_priv->id.ps == RDMA_PS_IPOIB)
2978 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
2979 IB_SA_MCMEMBER_REC_RATE_SELECTOR;
2980
2981 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2982 id_priv->id.port_num, &rec,
2983 comp_mask, GFP_KERNEL,
2984 cma_ib_mc_handler, mc);
2985 if (IS_ERR(mc->multicast.ib))
2986 return PTR_ERR(mc->multicast.ib);
2987
2988 return 0;
2989}
2990
2991static void iboe_mcast_work_handler(struct work_struct *work)
2992{
2993 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
2994 struct cma_multicast *mc = mw->mc;
2995 struct ib_sa_multicast *m = mc->multicast.ib;
2996
2997 mc->multicast.ib->context = mc;
2998 cma_ib_mc_handler(0, m);
2999 kref_put(&mc->mcref, release_mc);
3000 kfree(mw);
3001}
3002
3003static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
3004{
3005 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
3006 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
3007
3008 if (cma_any_addr(addr)) {
3009 memset(mgid, 0, sizeof *mgid);
3010 } else if (addr->sa_family == AF_INET6) {
3011 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
3012 } else {
3013 mgid->raw[0] = 0xff;
3014 mgid->raw[1] = 0x0e;
3015 mgid->raw[2] = 0;
3016 mgid->raw[3] = 0;
3017 mgid->raw[4] = 0;
3018 mgid->raw[5] = 0;
3019 mgid->raw[6] = 0;
3020 mgid->raw[7] = 0;
3021 mgid->raw[8] = 0;
3022 mgid->raw[9] = 0;
3023 mgid->raw[10] = 0xff;
3024 mgid->raw[11] = 0xff;
3025 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
3026 }
3027}
3028
3029static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3030 struct cma_multicast *mc)
3031{
3032 struct iboe_mcast_work *work;
3033 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3034 int err;
3035 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
3036 struct net_device *ndev = NULL;
3037
3038 if (cma_zero_addr((struct sockaddr *)&mc->addr))
3039 return -EINVAL;
3040
3041 work = kzalloc(sizeof *work, GFP_KERNEL);
3042 if (!work)
3043 return -ENOMEM;
3044
3045 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
3046 if (!mc->multicast.ib) {
3047 err = -ENOMEM;
3048 goto out1;
3049 }
3050
3051 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
3052
3053 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
3054 if (id_priv->id.ps == RDMA_PS_UDP)
3055 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
3056
3057 if (dev_addr->bound_dev_if)
3058 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
3059 if (!ndev) {
3060 err = -ENODEV;
3061 goto out2;
3062 }
3063 mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
3064 mc->multicast.ib->rec.hop_limit = 1;
3065 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
3066 dev_put(ndev);
3067 if (!mc->multicast.ib->rec.mtu) {
3068 err = -EINVAL;
3069 goto out2;
3070 }
3071 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
3072 work->id = id_priv;
3073 work->mc = mc;
3074 INIT_WORK(&work->work, iboe_mcast_work_handler);
3075 kref_get(&mc->mcref);
3076 queue_work(cma_wq, &work->work);
3077
3078 return 0;
3079
3080out2:
3081 kfree(mc->multicast.ib);
3082out1:
3083 kfree(work);
3084 return err;
3085}
3086
3087int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3088 void *context)
3089{
3090 struct rdma_id_private *id_priv;
3091 struct cma_multicast *mc;
3092 int ret;
3093
3094 id_priv = container_of(id, struct rdma_id_private, id);
3095 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3096 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
3097 return -EINVAL;
3098
3099 mc = kmalloc(sizeof *mc, GFP_KERNEL);
3100 if (!mc)
3101 return -ENOMEM;
3102
3103 memcpy(&mc->addr, addr, ip_addr_size(addr));
3104 mc->context = context;
3105 mc->id_priv = id_priv;
3106
3107 spin_lock(&id_priv->lock);
3108 list_add(&mc->list, &id_priv->mc_list);
3109 spin_unlock(&id_priv->lock);
3110
3111 switch (rdma_node_get_transport(id->device->node_type)) {
3112 case RDMA_TRANSPORT_IB:
3113 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3114 case IB_LINK_LAYER_INFINIBAND:
3115 ret = cma_join_ib_multicast(id_priv, mc);
3116 break;
3117 case IB_LINK_LAYER_ETHERNET:
3118 kref_init(&mc->mcref);
3119 ret = cma_iboe_join_multicast(id_priv, mc);
3120 break;
3121 default:
3122 ret = -EINVAL;
3123 }
3124 break;
3125 default:
3126 ret = -ENOSYS;
3127 break;
3128 }
3129
3130 if (ret) {
3131 spin_lock_irq(&id_priv->lock);
3132 list_del(&mc->list);
3133 spin_unlock_irq(&id_priv->lock);
3134 kfree(mc);
3135 }
3136 return ret;
3137}
3138EXPORT_SYMBOL(rdma_join_multicast);
3139
3140void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3141{
3142 struct rdma_id_private *id_priv;
3143 struct cma_multicast *mc;
3144
3145 id_priv = container_of(id, struct rdma_id_private, id);
3146 spin_lock_irq(&id_priv->lock);
3147 list_for_each_entry(mc, &id_priv->mc_list, list) {
3148 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
3149 list_del(&mc->list);
3150 spin_unlock_irq(&id_priv->lock);
3151
3152 if (id->qp)
3153 ib_detach_mcast(id->qp,
3154 &mc->multicast.ib->rec.mgid,
3155 mc->multicast.ib->rec.mlid);
3156 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
3157 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3158 case IB_LINK_LAYER_INFINIBAND:
3159 ib_sa_free_multicast(mc->multicast.ib);
3160 kfree(mc);
3161 break;
3162 case IB_LINK_LAYER_ETHERNET:
3163 kref_put(&mc->mcref, release_mc);
3164 break;
3165 default:
3166 break;
3167 }
3168 }
3169 return;
3170 }
3171 }
3172 spin_unlock_irq(&id_priv->lock);
3173}
3174EXPORT_SYMBOL(rdma_leave_multicast);
3175
3176static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
3177{
3178 struct rdma_dev_addr *dev_addr;
3179 struct cma_ndev_work *work;
3180
3181 dev_addr = &id_priv->id.route.addr.dev_addr;
3182
3183 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
3184 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
3185 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3186 ndev->name, &id_priv->id);
3187 work = kzalloc(sizeof *work, GFP_KERNEL);
3188 if (!work)
3189 return -ENOMEM;
3190
3191 INIT_WORK(&work->work, cma_ndev_work_handler);
3192 work->id = id_priv;
3193 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
3194 atomic_inc(&id_priv->refcount);
3195 queue_work(cma_wq, &work->work);
3196 }
3197
3198 return 0;
3199}
3200
3201static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
3202 void *ctx)
3203{
3204 struct net_device *ndev = (struct net_device *)ctx;
3205 struct cma_device *cma_dev;
3206 struct rdma_id_private *id_priv;
3207 int ret = NOTIFY_DONE;
3208
3209 if (dev_net(ndev) != &init_net)
3210 return NOTIFY_DONE;
3211
3212 if (event != NETDEV_BONDING_FAILOVER)
3213 return NOTIFY_DONE;
3214
3215 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
3216 return NOTIFY_DONE;
3217
3218 mutex_lock(&lock);
3219 list_for_each_entry(cma_dev, &dev_list, list)
3220 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3221 ret = cma_netdev_change(ndev, id_priv);
3222 if (ret)
3223 goto out;
3224 }
3225
3226out:
3227 mutex_unlock(&lock);
3228 return ret;
3229}
3230
3231static struct notifier_block cma_nb = {
3232 .notifier_call = cma_netdev_callback
3233};
3234
3235static void cma_add_one(struct ib_device *device)
3236{
3237 struct cma_device *cma_dev;
3238 struct rdma_id_private *id_priv;
3239
3240 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
3241 if (!cma_dev)
3242 return;
3243
3244 cma_dev->device = device;
3245
3246 init_completion(&cma_dev->comp);
3247 atomic_set(&cma_dev->refcount, 1);
3248 INIT_LIST_HEAD(&cma_dev->id_list);
3249 ib_set_client_data(device, &cma_client, cma_dev);
3250
3251 mutex_lock(&lock);
3252 list_add_tail(&cma_dev->list, &dev_list);
3253 list_for_each_entry(id_priv, &listen_any_list, list)
3254 cma_listen_on_dev(id_priv, cma_dev);
3255 mutex_unlock(&lock);
3256}
3257
3258static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3259{
3260 struct rdma_cm_event event;
3261 enum rdma_cm_state state;
3262 int ret = 0;
3263
3264 /* Record that we want to remove the device */
3265 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3266 if (state == RDMA_CM_DESTROYING)
3267 return 0;
3268
3269 cma_cancel_operation(id_priv, state);
3270 mutex_lock(&id_priv->handler_mutex);
3271
3272 /* Check for destruction from another callback. */
3273 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
3274 goto out;
3275
3276 memset(&event, 0, sizeof event);
3277 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
3278 ret = id_priv->id.event_handler(&id_priv->id, &event);
3279out:
3280 mutex_unlock(&id_priv->handler_mutex);
3281 return ret;
3282}
3283
3284static void cma_process_remove(struct cma_device *cma_dev)
3285{
3286 struct rdma_id_private *id_priv;
3287 int ret;
3288
3289 mutex_lock(&lock);
3290 while (!list_empty(&cma_dev->id_list)) {
3291 id_priv = list_entry(cma_dev->id_list.next,
3292 struct rdma_id_private, list);
3293
3294 list_del(&id_priv->listen_list);
3295 list_del_init(&id_priv->list);
3296 atomic_inc(&id_priv->refcount);
3297 mutex_unlock(&lock);
3298
3299 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
3300 cma_deref_id(id_priv);
3301 if (ret)
3302 rdma_destroy_id(&id_priv->id);
3303
3304 mutex_lock(&lock);
3305 }
3306 mutex_unlock(&lock);
3307
3308 cma_deref_dev(cma_dev);
3309 wait_for_completion(&cma_dev->comp);
3310}
3311
3312static void cma_remove_one(struct ib_device *device)
3313{
3314 struct cma_device *cma_dev;
3315
3316 cma_dev = ib_get_client_data(device, &cma_client);
3317 if (!cma_dev)
3318 return;
3319
3320 mutex_lock(&lock);
3321 list_del(&cma_dev->list);
3322 mutex_unlock(&lock);
3323
3324 cma_process_remove(cma_dev);
3325 kfree(cma_dev);
3326}
3327
3328static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
3329{
3330 struct nlmsghdr *nlh;
3331 struct rdma_cm_id_stats *id_stats;
3332 struct rdma_id_private *id_priv;
3333 struct rdma_cm_id *id = NULL;
3334 struct cma_device *cma_dev;
3335 int i_dev = 0, i_id = 0;
3336
3337 /*
3338 * We export all of the IDs as a sequence of messages. Each
3339 * ID gets its own netlink message.
3340 */
3341 mutex_lock(&lock);
3342
3343 list_for_each_entry(cma_dev, &dev_list, list) {
3344 if (i_dev < cb->args[0]) {
3345 i_dev++;
3346 continue;
3347 }
3348
3349 i_id = 0;
3350 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3351 if (i_id < cb->args[1]) {
3352 i_id++;
3353 continue;
3354 }
3355
3356 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
3357 sizeof *id_stats, RDMA_NL_RDMA_CM,
3358 RDMA_NL_RDMA_CM_ID_STATS);
3359 if (!id_stats)
3360 goto out;
3361
3362 memset(id_stats, 0, sizeof *id_stats);
3363 id = &id_priv->id;
3364 id_stats->node_type = id->route.addr.dev_addr.dev_type;
3365 id_stats->port_num = id->port_num;
3366 id_stats->bound_dev_if =
3367 id->route.addr.dev_addr.bound_dev_if;
3368
3369 if (id->route.addr.src_addr.ss_family == AF_INET) {
3370 if (ibnl_put_attr(skb, nlh,
3371 sizeof(struct sockaddr_in),
3372 &id->route.addr.src_addr,
3373 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
3374 goto out;
3375 }
3376 if (ibnl_put_attr(skb, nlh,
3377 sizeof(struct sockaddr_in),
3378 &id->route.addr.dst_addr,
3379 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
3380 goto out;
3381 }
3382 } else if (id->route.addr.src_addr.ss_family == AF_INET6) {
3383 if (ibnl_put_attr(skb, nlh,
3384 sizeof(struct sockaddr_in6),
3385 &id->route.addr.src_addr,
3386 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
3387 goto out;
3388 }
3389 if (ibnl_put_attr(skb, nlh,
3390 sizeof(struct sockaddr_in6),
3391 &id->route.addr.dst_addr,
3392 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
3393 goto out;
3394 }
3395 }
3396
3397 id_stats->pid = id_priv->owner;
3398 id_stats->port_space = id->ps;
3399 id_stats->cm_state = id_priv->state;
3400 id_stats->qp_num = id_priv->qp_num;
3401 id_stats->qp_type = id->qp_type;
3402
3403 i_id++;
3404 }
3405
3406 cb->args[1] = 0;
3407 i_dev++;
3408 }
3409
3410out:
3411 mutex_unlock(&lock);
3412 cb->args[0] = i_dev;
3413 cb->args[1] = i_id;
3414
3415 return skb->len;
3416}
3417
3418static const struct ibnl_client_cbs cma_cb_table[] = {
3419 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
3420};
3421
3422static int __init cma_init(void)
3423{
3424 int ret;
3425
3426 cma_wq = create_singlethread_workqueue("rdma_cm");
3427 if (!cma_wq)
3428 return -ENOMEM;
3429
3430 ib_sa_register_client(&sa_client);
3431 rdma_addr_register_client(&addr_client);
3432 register_netdevice_notifier(&cma_nb);
3433
3434 ret = ib_register_client(&cma_client);
3435 if (ret)
3436 goto err;
3437
3438 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
3439 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
3440
3441 return 0;
3442
3443err:
3444 unregister_netdevice_notifier(&cma_nb);
3445 rdma_addr_unregister_client(&addr_client);
3446 ib_sa_unregister_client(&sa_client);
3447 destroy_workqueue(cma_wq);
3448 return ret;
3449}
3450
3451static void __exit cma_cleanup(void)
3452{
3453 ibnl_remove_client(RDMA_NL_RDMA_CM);
3454 ib_unregister_client(&cma_client);
3455 unregister_netdevice_notifier(&cma_nb);
3456 rdma_addr_unregister_client(&addr_client);
3457 ib_sa_unregister_client(&sa_client);
3458 destroy_workqueue(cma_wq);
3459 idr_destroy(&sdp_ps);
3460 idr_destroy(&tcp_ps);
3461 idr_destroy(&udp_ps);
3462 idr_destroy(&ipoib_ps);
3463}
3464
3465module_init(cma_init);
3466module_exit(cma_cleanup);
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 */
8
9#include <linux/completion.h>
10#include <linux/in.h>
11#include <linux/in6.h>
12#include <linux/mutex.h>
13#include <linux/random.h>
14#include <linux/rbtree.h>
15#include <linux/igmp.h>
16#include <linux/xarray.h>
17#include <linux/inetdevice.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <net/route.h>
21
22#include <net/net_namespace.h>
23#include <net/netns/generic.h>
24#include <net/netevent.h>
25#include <net/tcp.h>
26#include <net/ipv6.h>
27#include <net/ip_fib.h>
28#include <net/ip6_route.h>
29
30#include <rdma/rdma_cm.h>
31#include <rdma/rdma_cm_ib.h>
32#include <rdma/rdma_netlink.h>
33#include <rdma/ib.h>
34#include <rdma/ib_cache.h>
35#include <rdma/ib_cm.h>
36#include <rdma/ib_sa.h>
37#include <rdma/iw_cm.h>
38
39#include "core_priv.h"
40#include "cma_priv.h"
41#include "cma_trace.h"
42
43MODULE_AUTHOR("Sean Hefty");
44MODULE_DESCRIPTION("Generic RDMA CM Agent");
45MODULE_LICENSE("Dual BSD/GPL");
46
47#define CMA_CM_RESPONSE_TIMEOUT 20
48#define CMA_MAX_CM_RETRIES 15
49#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
50#define CMA_IBOE_PACKET_LIFETIME 16
51#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
52
53static const char * const cma_events[] = {
54 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
55 [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
56 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
57 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
58 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
59 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
60 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
61 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
62 [RDMA_CM_EVENT_REJECTED] = "rejected",
63 [RDMA_CM_EVENT_ESTABLISHED] = "established",
64 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
65 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
66 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
67 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
68 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
69 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
70};
71
72static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
73 enum ib_gid_type gid_type);
74
75const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
76{
77 size_t index = event;
78
79 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
80 cma_events[index] : "unrecognized event";
81}
82EXPORT_SYMBOL(rdma_event_msg);
83
84const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
85 int reason)
86{
87 if (rdma_ib_or_roce(id->device, id->port_num))
88 return ibcm_reject_msg(reason);
89
90 if (rdma_protocol_iwarp(id->device, id->port_num))
91 return iwcm_reject_msg(reason);
92
93 WARN_ON_ONCE(1);
94 return "unrecognized transport";
95}
96EXPORT_SYMBOL(rdma_reject_msg);
97
98/**
99 * rdma_is_consumer_reject - return true if the consumer rejected the connect
100 * request.
101 * @id: Communication identifier that received the REJECT event.
102 * @reason: Value returned in the REJECT event status field.
103 */
104static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
105{
106 if (rdma_ib_or_roce(id->device, id->port_num))
107 return reason == IB_CM_REJ_CONSUMER_DEFINED;
108
109 if (rdma_protocol_iwarp(id->device, id->port_num))
110 return reason == -ECONNREFUSED;
111
112 WARN_ON_ONCE(1);
113 return false;
114}
115
116const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
117 struct rdma_cm_event *ev, u8 *data_len)
118{
119 const void *p;
120
121 if (rdma_is_consumer_reject(id, ev->status)) {
122 *data_len = ev->param.conn.private_data_len;
123 p = ev->param.conn.private_data;
124 } else {
125 *data_len = 0;
126 p = NULL;
127 }
128 return p;
129}
130EXPORT_SYMBOL(rdma_consumer_reject_data);
131
132/**
133 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
134 * @id: Communication Identifier
135 */
136struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
137{
138 struct rdma_id_private *id_priv;
139
140 id_priv = container_of(id, struct rdma_id_private, id);
141 if (id->device->node_type == RDMA_NODE_RNIC)
142 return id_priv->cm_id.iw;
143 return NULL;
144}
145EXPORT_SYMBOL(rdma_iw_cm_id);
146
147/**
148 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
149 * @res: rdma resource tracking entry pointer
150 */
151struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
152{
153 struct rdma_id_private *id_priv =
154 container_of(res, struct rdma_id_private, res);
155
156 return &id_priv->id;
157}
158EXPORT_SYMBOL(rdma_res_to_id);
159
160static int cma_add_one(struct ib_device *device);
161static void cma_remove_one(struct ib_device *device, void *client_data);
162
163static struct ib_client cma_client = {
164 .name = "cma",
165 .add = cma_add_one,
166 .remove = cma_remove_one
167};
168
169static struct ib_sa_client sa_client;
170static LIST_HEAD(dev_list);
171static LIST_HEAD(listen_any_list);
172static DEFINE_MUTEX(lock);
173static struct rb_root id_table = RB_ROOT;
174/* Serialize operations of id_table tree */
175static DEFINE_SPINLOCK(id_table_lock);
176static struct workqueue_struct *cma_wq;
177static unsigned int cma_pernet_id;
178
179struct cma_pernet {
180 struct xarray tcp_ps;
181 struct xarray udp_ps;
182 struct xarray ipoib_ps;
183 struct xarray ib_ps;
184};
185
186static struct cma_pernet *cma_pernet(struct net *net)
187{
188 return net_generic(net, cma_pernet_id);
189}
190
191static
192struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
193{
194 struct cma_pernet *pernet = cma_pernet(net);
195
196 switch (ps) {
197 case RDMA_PS_TCP:
198 return &pernet->tcp_ps;
199 case RDMA_PS_UDP:
200 return &pernet->udp_ps;
201 case RDMA_PS_IPOIB:
202 return &pernet->ipoib_ps;
203 case RDMA_PS_IB:
204 return &pernet->ib_ps;
205 default:
206 return NULL;
207 }
208}
209
210struct id_table_entry {
211 struct list_head id_list;
212 struct rb_node rb_node;
213};
214
215struct cma_device {
216 struct list_head list;
217 struct ib_device *device;
218 struct completion comp;
219 refcount_t refcount;
220 struct list_head id_list;
221 enum ib_gid_type *default_gid_type;
222 u8 *default_roce_tos;
223};
224
225struct rdma_bind_list {
226 enum rdma_ucm_port_space ps;
227 struct hlist_head owners;
228 unsigned short port;
229};
230
231static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
232 struct rdma_bind_list *bind_list, int snum)
233{
234 struct xarray *xa = cma_pernet_xa(net, ps);
235
236 return xa_insert(xa, snum, bind_list, GFP_KERNEL);
237}
238
239static struct rdma_bind_list *cma_ps_find(struct net *net,
240 enum rdma_ucm_port_space ps, int snum)
241{
242 struct xarray *xa = cma_pernet_xa(net, ps);
243
244 return xa_load(xa, snum);
245}
246
247static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
248 int snum)
249{
250 struct xarray *xa = cma_pernet_xa(net, ps);
251
252 xa_erase(xa, snum);
253}
254
255enum {
256 CMA_OPTION_AFONLY,
257};
258
259void cma_dev_get(struct cma_device *cma_dev)
260{
261 refcount_inc(&cma_dev->refcount);
262}
263
264void cma_dev_put(struct cma_device *cma_dev)
265{
266 if (refcount_dec_and_test(&cma_dev->refcount))
267 complete(&cma_dev->comp);
268}
269
270struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
271 void *cookie)
272{
273 struct cma_device *cma_dev;
274 struct cma_device *found_cma_dev = NULL;
275
276 mutex_lock(&lock);
277
278 list_for_each_entry(cma_dev, &dev_list, list)
279 if (filter(cma_dev->device, cookie)) {
280 found_cma_dev = cma_dev;
281 break;
282 }
283
284 if (found_cma_dev)
285 cma_dev_get(found_cma_dev);
286 mutex_unlock(&lock);
287 return found_cma_dev;
288}
289
290int cma_get_default_gid_type(struct cma_device *cma_dev,
291 u32 port)
292{
293 if (!rdma_is_port_valid(cma_dev->device, port))
294 return -EINVAL;
295
296 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
297}
298
299int cma_set_default_gid_type(struct cma_device *cma_dev,
300 u32 port,
301 enum ib_gid_type default_gid_type)
302{
303 unsigned long supported_gids;
304
305 if (!rdma_is_port_valid(cma_dev->device, port))
306 return -EINVAL;
307
308 if (default_gid_type == IB_GID_TYPE_IB &&
309 rdma_protocol_roce_eth_encap(cma_dev->device, port))
310 default_gid_type = IB_GID_TYPE_ROCE;
311
312 supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
313
314 if (!(supported_gids & 1 << default_gid_type))
315 return -EINVAL;
316
317 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
318 default_gid_type;
319
320 return 0;
321}
322
323int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
324{
325 if (!rdma_is_port_valid(cma_dev->device, port))
326 return -EINVAL;
327
328 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
329}
330
331int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
332 u8 default_roce_tos)
333{
334 if (!rdma_is_port_valid(cma_dev->device, port))
335 return -EINVAL;
336
337 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
338 default_roce_tos;
339
340 return 0;
341}
342struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
343{
344 return cma_dev->device;
345}
346
347/*
348 * Device removal can occur at anytime, so we need extra handling to
349 * serialize notifying the user of device removal with other callbacks.
350 * We do this by disabling removal notification while a callback is in process,
351 * and reporting it after the callback completes.
352 */
353
354struct cma_multicast {
355 struct rdma_id_private *id_priv;
356 union {
357 struct ib_sa_multicast *sa_mc;
358 struct {
359 struct work_struct work;
360 struct rdma_cm_event event;
361 } iboe_join;
362 };
363 struct list_head list;
364 void *context;
365 struct sockaddr_storage addr;
366 u8 join_state;
367};
368
369struct cma_work {
370 struct work_struct work;
371 struct rdma_id_private *id;
372 enum rdma_cm_state old_state;
373 enum rdma_cm_state new_state;
374 struct rdma_cm_event event;
375};
376
377union cma_ip_addr {
378 struct in6_addr ip6;
379 struct {
380 __be32 pad[3];
381 __be32 addr;
382 } ip4;
383};
384
385struct cma_hdr {
386 u8 cma_version;
387 u8 ip_version; /* IP version: 7:4 */
388 __be16 port;
389 union cma_ip_addr src_addr;
390 union cma_ip_addr dst_addr;
391};
392
393#define CMA_VERSION 0x00
394
395struct cma_req_info {
396 struct sockaddr_storage listen_addr_storage;
397 struct sockaddr_storage src_addr_storage;
398 struct ib_device *device;
399 union ib_gid local_gid;
400 __be64 service_id;
401 int port;
402 bool has_gid;
403 u16 pkey;
404};
405
406static int cma_comp_exch(struct rdma_id_private *id_priv,
407 enum rdma_cm_state comp, enum rdma_cm_state exch)
408{
409 unsigned long flags;
410 int ret;
411
412 /*
413 * The FSM uses a funny double locking where state is protected by both
414 * the handler_mutex and the spinlock. State is not allowed to change
415 * to/from a handler_mutex protected value without also holding
416 * handler_mutex.
417 */
418 if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
419 lockdep_assert_held(&id_priv->handler_mutex);
420
421 spin_lock_irqsave(&id_priv->lock, flags);
422 if ((ret = (id_priv->state == comp)))
423 id_priv->state = exch;
424 spin_unlock_irqrestore(&id_priv->lock, flags);
425 return ret;
426}
427
428static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
429{
430 return hdr->ip_version >> 4;
431}
432
433static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
434{
435 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
436}
437
438static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
439{
440 return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
441}
442
443static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
444{
445 return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
446}
447
448static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
449{
450 struct in_device *in_dev = NULL;
451
452 if (ndev) {
453 rtnl_lock();
454 in_dev = __in_dev_get_rtnl(ndev);
455 if (in_dev) {
456 if (join)
457 ip_mc_inc_group(in_dev,
458 *(__be32 *)(mgid->raw + 12));
459 else
460 ip_mc_dec_group(in_dev,
461 *(__be32 *)(mgid->raw + 12));
462 }
463 rtnl_unlock();
464 }
465 return (in_dev) ? 0 : -ENODEV;
466}
467
468static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
469 struct id_table_entry *entry_b)
470{
471 struct rdma_id_private *id_priv = list_first_entry(
472 &entry_b->id_list, struct rdma_id_private, id_list_entry);
473 int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
474 struct sockaddr *sb = cma_dst_addr(id_priv);
475
476 if (ifindex_a != ifindex_b)
477 return (ifindex_a > ifindex_b) ? 1 : -1;
478
479 if (sa->sa_family != sb->sa_family)
480 return sa->sa_family - sb->sa_family;
481
482 if (sa->sa_family == AF_INET)
483 return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
484 (char *)&((struct sockaddr_in *)sb)->sin_addr,
485 sizeof(((struct sockaddr_in *)sa)->sin_addr));
486
487 return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
488 &((struct sockaddr_in6 *)sb)->sin6_addr);
489}
490
491static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
492{
493 struct rb_node **new, *parent = NULL;
494 struct id_table_entry *this, *node;
495 unsigned long flags;
496 int result;
497
498 node = kzalloc(sizeof(*node), GFP_KERNEL);
499 if (!node)
500 return -ENOMEM;
501
502 spin_lock_irqsave(&id_table_lock, flags);
503 new = &id_table.rb_node;
504 while (*new) {
505 this = container_of(*new, struct id_table_entry, rb_node);
506 result = compare_netdev_and_ip(
507 node_id_priv->id.route.addr.dev_addr.bound_dev_if,
508 cma_dst_addr(node_id_priv), this);
509
510 parent = *new;
511 if (result < 0)
512 new = &((*new)->rb_left);
513 else if (result > 0)
514 new = &((*new)->rb_right);
515 else {
516 list_add_tail(&node_id_priv->id_list_entry,
517 &this->id_list);
518 kfree(node);
519 goto unlock;
520 }
521 }
522
523 INIT_LIST_HEAD(&node->id_list);
524 list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
525
526 rb_link_node(&node->rb_node, parent, new);
527 rb_insert_color(&node->rb_node, &id_table);
528
529unlock:
530 spin_unlock_irqrestore(&id_table_lock, flags);
531 return 0;
532}
533
534static struct id_table_entry *
535node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
536{
537 struct rb_node *node = root->rb_node;
538 struct id_table_entry *data;
539 int result;
540
541 while (node) {
542 data = container_of(node, struct id_table_entry, rb_node);
543 result = compare_netdev_and_ip(ifindex, sa, data);
544 if (result < 0)
545 node = node->rb_left;
546 else if (result > 0)
547 node = node->rb_right;
548 else
549 return data;
550 }
551
552 return NULL;
553}
554
555static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
556{
557 struct id_table_entry *data;
558 unsigned long flags;
559
560 spin_lock_irqsave(&id_table_lock, flags);
561 if (list_empty(&id_priv->id_list_entry))
562 goto out;
563
564 data = node_from_ndev_ip(&id_table,
565 id_priv->id.route.addr.dev_addr.bound_dev_if,
566 cma_dst_addr(id_priv));
567 if (!data)
568 goto out;
569
570 list_del_init(&id_priv->id_list_entry);
571 if (list_empty(&data->id_list)) {
572 rb_erase(&data->rb_node, &id_table);
573 kfree(data);
574 }
575out:
576 spin_unlock_irqrestore(&id_table_lock, flags);
577}
578
579static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
580 struct cma_device *cma_dev)
581{
582 cma_dev_get(cma_dev);
583 id_priv->cma_dev = cma_dev;
584 id_priv->id.device = cma_dev->device;
585 id_priv->id.route.addr.dev_addr.transport =
586 rdma_node_get_transport(cma_dev->device->node_type);
587 list_add_tail(&id_priv->device_item, &cma_dev->id_list);
588
589 trace_cm_id_attach(id_priv, cma_dev->device);
590}
591
592static void cma_attach_to_dev(struct rdma_id_private *id_priv,
593 struct cma_device *cma_dev)
594{
595 _cma_attach_to_dev(id_priv, cma_dev);
596 id_priv->gid_type =
597 cma_dev->default_gid_type[id_priv->id.port_num -
598 rdma_start_port(cma_dev->device)];
599}
600
601static void cma_release_dev(struct rdma_id_private *id_priv)
602{
603 mutex_lock(&lock);
604 list_del_init(&id_priv->device_item);
605 cma_dev_put(id_priv->cma_dev);
606 id_priv->cma_dev = NULL;
607 id_priv->id.device = NULL;
608 if (id_priv->id.route.addr.dev_addr.sgid_attr) {
609 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
610 id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
611 }
612 mutex_unlock(&lock);
613}
614
615static inline unsigned short cma_family(struct rdma_id_private *id_priv)
616{
617 return id_priv->id.route.addr.src_addr.ss_family;
618}
619
620static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
621{
622 struct ib_sa_mcmember_rec rec;
623 int ret = 0;
624
625 if (id_priv->qkey) {
626 if (qkey && id_priv->qkey != qkey)
627 return -EINVAL;
628 return 0;
629 }
630
631 if (qkey) {
632 id_priv->qkey = qkey;
633 return 0;
634 }
635
636 switch (id_priv->id.ps) {
637 case RDMA_PS_UDP:
638 case RDMA_PS_IB:
639 id_priv->qkey = RDMA_UDP_QKEY;
640 break;
641 case RDMA_PS_IPOIB:
642 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
643 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
644 id_priv->id.port_num, &rec.mgid,
645 &rec);
646 if (!ret)
647 id_priv->qkey = be32_to_cpu(rec.qkey);
648 break;
649 default:
650 break;
651 }
652 return ret;
653}
654
655static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
656{
657 dev_addr->dev_type = ARPHRD_INFINIBAND;
658 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
659 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
660}
661
662static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
663{
664 int ret;
665
666 if (addr->sa_family != AF_IB) {
667 ret = rdma_translate_ip(addr, dev_addr);
668 } else {
669 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
670 ret = 0;
671 }
672
673 return ret;
674}
675
676static const struct ib_gid_attr *
677cma_validate_port(struct ib_device *device, u32 port,
678 enum ib_gid_type gid_type,
679 union ib_gid *gid,
680 struct rdma_id_private *id_priv)
681{
682 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
683 int bound_if_index = dev_addr->bound_dev_if;
684 const struct ib_gid_attr *sgid_attr;
685 int dev_type = dev_addr->dev_type;
686 struct net_device *ndev = NULL;
687
688 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
689 return ERR_PTR(-ENODEV);
690
691 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
692 return ERR_PTR(-ENODEV);
693
694 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
695 return ERR_PTR(-ENODEV);
696
697 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
698 ndev = dev_get_by_index(dev_addr->net, bound_if_index);
699 if (!ndev)
700 return ERR_PTR(-ENODEV);
701 } else {
702 gid_type = IB_GID_TYPE_IB;
703 }
704
705 sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
706 if (ndev)
707 dev_put(ndev);
708 return sgid_attr;
709}
710
711static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
712 const struct ib_gid_attr *sgid_attr)
713{
714 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
715 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
716}
717
718/**
719 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
720 * based on source ip address.
721 * @id_priv: cm_id which should be bound to cma device
722 *
723 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
724 * based on source IP address. It returns 0 on success or error code otherwise.
725 * It is applicable to active and passive side cm_id.
726 */
727static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
728{
729 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
730 const struct ib_gid_attr *sgid_attr;
731 union ib_gid gid, iboe_gid, *gidp;
732 struct cma_device *cma_dev;
733 enum ib_gid_type gid_type;
734 int ret = -ENODEV;
735 u32 port;
736
737 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
738 id_priv->id.ps == RDMA_PS_IPOIB)
739 return -EINVAL;
740
741 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
742 &iboe_gid);
743
744 memcpy(&gid, dev_addr->src_dev_addr +
745 rdma_addr_gid_offset(dev_addr), sizeof(gid));
746
747 mutex_lock(&lock);
748 list_for_each_entry(cma_dev, &dev_list, list) {
749 rdma_for_each_port (cma_dev->device, port) {
750 gidp = rdma_protocol_roce(cma_dev->device, port) ?
751 &iboe_gid : &gid;
752 gid_type = cma_dev->default_gid_type[port - 1];
753 sgid_attr = cma_validate_port(cma_dev->device, port,
754 gid_type, gidp, id_priv);
755 if (!IS_ERR(sgid_attr)) {
756 id_priv->id.port_num = port;
757 cma_bind_sgid_attr(id_priv, sgid_attr);
758 cma_attach_to_dev(id_priv, cma_dev);
759 ret = 0;
760 goto out;
761 }
762 }
763 }
764out:
765 mutex_unlock(&lock);
766 return ret;
767}
768
769/**
770 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
771 * @id_priv: cm id to bind to cma device
772 * @listen_id_priv: listener cm id to match against
773 * @req: Pointer to req structure containaining incoming
774 * request information
775 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
776 * rdma device matches for listen_id and incoming request. It also verifies
777 * that a GID table entry is present for the source address.
778 * Returns 0 on success, or returns error code otherwise.
779 */
780static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
781 const struct rdma_id_private *listen_id_priv,
782 struct cma_req_info *req)
783{
784 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
785 const struct ib_gid_attr *sgid_attr;
786 enum ib_gid_type gid_type;
787 union ib_gid gid;
788
789 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
790 id_priv->id.ps == RDMA_PS_IPOIB)
791 return -EINVAL;
792
793 if (rdma_protocol_roce(req->device, req->port))
794 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
795 &gid);
796 else
797 memcpy(&gid, dev_addr->src_dev_addr +
798 rdma_addr_gid_offset(dev_addr), sizeof(gid));
799
800 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
801 sgid_attr = cma_validate_port(req->device, req->port,
802 gid_type, &gid, id_priv);
803 if (IS_ERR(sgid_attr))
804 return PTR_ERR(sgid_attr);
805
806 id_priv->id.port_num = req->port;
807 cma_bind_sgid_attr(id_priv, sgid_attr);
808 /* Need to acquire lock to protect against reader
809 * of cma_dev->id_list such as cma_netdev_callback() and
810 * cma_process_remove().
811 */
812 mutex_lock(&lock);
813 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
814 mutex_unlock(&lock);
815 rdma_restrack_add(&id_priv->res);
816 return 0;
817}
818
819static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
820 const struct rdma_id_private *listen_id_priv)
821{
822 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
823 const struct ib_gid_attr *sgid_attr;
824 struct cma_device *cma_dev;
825 enum ib_gid_type gid_type;
826 int ret = -ENODEV;
827 union ib_gid gid;
828 u32 port;
829
830 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
831 id_priv->id.ps == RDMA_PS_IPOIB)
832 return -EINVAL;
833
834 memcpy(&gid, dev_addr->src_dev_addr +
835 rdma_addr_gid_offset(dev_addr), sizeof(gid));
836
837 mutex_lock(&lock);
838
839 cma_dev = listen_id_priv->cma_dev;
840 port = listen_id_priv->id.port_num;
841 gid_type = listen_id_priv->gid_type;
842 sgid_attr = cma_validate_port(cma_dev->device, port,
843 gid_type, &gid, id_priv);
844 if (!IS_ERR(sgid_attr)) {
845 id_priv->id.port_num = port;
846 cma_bind_sgid_attr(id_priv, sgid_attr);
847 ret = 0;
848 goto out;
849 }
850
851 list_for_each_entry(cma_dev, &dev_list, list) {
852 rdma_for_each_port (cma_dev->device, port) {
853 if (listen_id_priv->cma_dev == cma_dev &&
854 listen_id_priv->id.port_num == port)
855 continue;
856
857 gid_type = cma_dev->default_gid_type[port - 1];
858 sgid_attr = cma_validate_port(cma_dev->device, port,
859 gid_type, &gid, id_priv);
860 if (!IS_ERR(sgid_attr)) {
861 id_priv->id.port_num = port;
862 cma_bind_sgid_attr(id_priv, sgid_attr);
863 ret = 0;
864 goto out;
865 }
866 }
867 }
868
869out:
870 if (!ret) {
871 cma_attach_to_dev(id_priv, cma_dev);
872 rdma_restrack_add(&id_priv->res);
873 }
874
875 mutex_unlock(&lock);
876 return ret;
877}
878
879/*
880 * Select the source IB device and address to reach the destination IB address.
881 */
882static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
883{
884 struct cma_device *cma_dev, *cur_dev;
885 struct sockaddr_ib *addr;
886 union ib_gid gid, sgid, *dgid;
887 unsigned int p;
888 u16 pkey, index;
889 enum ib_port_state port_state;
890 int ret;
891 int i;
892
893 cma_dev = NULL;
894 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
895 dgid = (union ib_gid *) &addr->sib_addr;
896 pkey = ntohs(addr->sib_pkey);
897
898 mutex_lock(&lock);
899 list_for_each_entry(cur_dev, &dev_list, list) {
900 rdma_for_each_port (cur_dev->device, p) {
901 if (!rdma_cap_af_ib(cur_dev->device, p))
902 continue;
903
904 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
905 continue;
906
907 if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
908 continue;
909
910 for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
911 ++i) {
912 ret = rdma_query_gid(cur_dev->device, p, i,
913 &gid);
914 if (ret)
915 continue;
916
917 if (!memcmp(&gid, dgid, sizeof(gid))) {
918 cma_dev = cur_dev;
919 sgid = gid;
920 id_priv->id.port_num = p;
921 goto found;
922 }
923
924 if (!cma_dev && (gid.global.subnet_prefix ==
925 dgid->global.subnet_prefix) &&
926 port_state == IB_PORT_ACTIVE) {
927 cma_dev = cur_dev;
928 sgid = gid;
929 id_priv->id.port_num = p;
930 goto found;
931 }
932 }
933 }
934 }
935 mutex_unlock(&lock);
936 return -ENODEV;
937
938found:
939 cma_attach_to_dev(id_priv, cma_dev);
940 rdma_restrack_add(&id_priv->res);
941 mutex_unlock(&lock);
942 addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
943 memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
944 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
945 return 0;
946}
947
948static void cma_id_get(struct rdma_id_private *id_priv)
949{
950 refcount_inc(&id_priv->refcount);
951}
952
953static void cma_id_put(struct rdma_id_private *id_priv)
954{
955 if (refcount_dec_and_test(&id_priv->refcount))
956 complete(&id_priv->comp);
957}
958
959static struct rdma_id_private *
960__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
961 void *context, enum rdma_ucm_port_space ps,
962 enum ib_qp_type qp_type, const struct rdma_id_private *parent)
963{
964 struct rdma_id_private *id_priv;
965
966 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
967 if (!id_priv)
968 return ERR_PTR(-ENOMEM);
969
970 id_priv->state = RDMA_CM_IDLE;
971 id_priv->id.context = context;
972 id_priv->id.event_handler = event_handler;
973 id_priv->id.ps = ps;
974 id_priv->id.qp_type = qp_type;
975 id_priv->tos_set = false;
976 id_priv->timeout_set = false;
977 id_priv->min_rnr_timer_set = false;
978 id_priv->gid_type = IB_GID_TYPE_IB;
979 spin_lock_init(&id_priv->lock);
980 mutex_init(&id_priv->qp_mutex);
981 init_completion(&id_priv->comp);
982 refcount_set(&id_priv->refcount, 1);
983 mutex_init(&id_priv->handler_mutex);
984 INIT_LIST_HEAD(&id_priv->device_item);
985 INIT_LIST_HEAD(&id_priv->id_list_entry);
986 INIT_LIST_HEAD(&id_priv->listen_list);
987 INIT_LIST_HEAD(&id_priv->mc_list);
988 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
989 id_priv->id.route.addr.dev_addr.net = get_net(net);
990 id_priv->seq_num &= 0x00ffffff;
991
992 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
993 if (parent)
994 rdma_restrack_parent_name(&id_priv->res, &parent->res);
995
996 return id_priv;
997}
998
999struct rdma_cm_id *
1000__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
1001 void *context, enum rdma_ucm_port_space ps,
1002 enum ib_qp_type qp_type, const char *caller)
1003{
1004 struct rdma_id_private *ret;
1005
1006 ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
1007 if (IS_ERR(ret))
1008 return ERR_CAST(ret);
1009
1010 rdma_restrack_set_name(&ret->res, caller);
1011 return &ret->id;
1012}
1013EXPORT_SYMBOL(__rdma_create_kernel_id);
1014
1015struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
1016 void *context,
1017 enum rdma_ucm_port_space ps,
1018 enum ib_qp_type qp_type)
1019{
1020 struct rdma_id_private *ret;
1021
1022 ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
1023 ps, qp_type, NULL);
1024 if (IS_ERR(ret))
1025 return ERR_CAST(ret);
1026
1027 rdma_restrack_set_name(&ret->res, NULL);
1028 return &ret->id;
1029}
1030EXPORT_SYMBOL(rdma_create_user_id);
1031
1032static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1033{
1034 struct ib_qp_attr qp_attr;
1035 int qp_attr_mask, ret;
1036
1037 qp_attr.qp_state = IB_QPS_INIT;
1038 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1039 if (ret)
1040 return ret;
1041
1042 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1043 if (ret)
1044 return ret;
1045
1046 qp_attr.qp_state = IB_QPS_RTR;
1047 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
1048 if (ret)
1049 return ret;
1050
1051 qp_attr.qp_state = IB_QPS_RTS;
1052 qp_attr.sq_psn = 0;
1053 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
1054
1055 return ret;
1056}
1057
1058static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1059{
1060 struct ib_qp_attr qp_attr;
1061 int qp_attr_mask, ret;
1062
1063 qp_attr.qp_state = IB_QPS_INIT;
1064 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1065 if (ret)
1066 return ret;
1067
1068 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1069}
1070
1071int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
1072 struct ib_qp_init_attr *qp_init_attr)
1073{
1074 struct rdma_id_private *id_priv;
1075 struct ib_qp *qp;
1076 int ret;
1077
1078 id_priv = container_of(id, struct rdma_id_private, id);
1079 if (id->device != pd->device) {
1080 ret = -EINVAL;
1081 goto out_err;
1082 }
1083
1084 qp_init_attr->port_num = id->port_num;
1085 qp = ib_create_qp(pd, qp_init_attr);
1086 if (IS_ERR(qp)) {
1087 ret = PTR_ERR(qp);
1088 goto out_err;
1089 }
1090
1091 if (id->qp_type == IB_QPT_UD)
1092 ret = cma_init_ud_qp(id_priv, qp);
1093 else
1094 ret = cma_init_conn_qp(id_priv, qp);
1095 if (ret)
1096 goto out_destroy;
1097
1098 id->qp = qp;
1099 id_priv->qp_num = qp->qp_num;
1100 id_priv->srq = (qp->srq != NULL);
1101 trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
1102 return 0;
1103out_destroy:
1104 ib_destroy_qp(qp);
1105out_err:
1106 trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
1107 return ret;
1108}
1109EXPORT_SYMBOL(rdma_create_qp);
1110
1111void rdma_destroy_qp(struct rdma_cm_id *id)
1112{
1113 struct rdma_id_private *id_priv;
1114
1115 id_priv = container_of(id, struct rdma_id_private, id);
1116 trace_cm_qp_destroy(id_priv);
1117 mutex_lock(&id_priv->qp_mutex);
1118 ib_destroy_qp(id_priv->id.qp);
1119 id_priv->id.qp = NULL;
1120 mutex_unlock(&id_priv->qp_mutex);
1121}
1122EXPORT_SYMBOL(rdma_destroy_qp);
1123
1124static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
1125 struct rdma_conn_param *conn_param)
1126{
1127 struct ib_qp_attr qp_attr;
1128 int qp_attr_mask, ret;
1129
1130 mutex_lock(&id_priv->qp_mutex);
1131 if (!id_priv->id.qp) {
1132 ret = 0;
1133 goto out;
1134 }
1135
1136 /* Need to update QP attributes from default values. */
1137 qp_attr.qp_state = IB_QPS_INIT;
1138 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1139 if (ret)
1140 goto out;
1141
1142 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1143 if (ret)
1144 goto out;
1145
1146 qp_attr.qp_state = IB_QPS_RTR;
1147 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1148 if (ret)
1149 goto out;
1150
1151 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1152
1153 if (conn_param)
1154 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
1155 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1156out:
1157 mutex_unlock(&id_priv->qp_mutex);
1158 return ret;
1159}
1160
1161static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
1162 struct rdma_conn_param *conn_param)
1163{
1164 struct ib_qp_attr qp_attr;
1165 int qp_attr_mask, ret;
1166
1167 mutex_lock(&id_priv->qp_mutex);
1168 if (!id_priv->id.qp) {
1169 ret = 0;
1170 goto out;
1171 }
1172
1173 qp_attr.qp_state = IB_QPS_RTS;
1174 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1175 if (ret)
1176 goto out;
1177
1178 if (conn_param)
1179 qp_attr.max_rd_atomic = conn_param->initiator_depth;
1180 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1181out:
1182 mutex_unlock(&id_priv->qp_mutex);
1183 return ret;
1184}
1185
1186static int cma_modify_qp_err(struct rdma_id_private *id_priv)
1187{
1188 struct ib_qp_attr qp_attr;
1189 int ret;
1190
1191 mutex_lock(&id_priv->qp_mutex);
1192 if (!id_priv->id.qp) {
1193 ret = 0;
1194 goto out;
1195 }
1196
1197 qp_attr.qp_state = IB_QPS_ERR;
1198 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
1199out:
1200 mutex_unlock(&id_priv->qp_mutex);
1201 return ret;
1202}
1203
1204static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
1205 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1206{
1207 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
1208 int ret;
1209 u16 pkey;
1210
1211 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
1212 pkey = 0xffff;
1213 else
1214 pkey = ib_addr_get_pkey(dev_addr);
1215
1216 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
1217 pkey, &qp_attr->pkey_index);
1218 if (ret)
1219 return ret;
1220
1221 qp_attr->port_num = id_priv->id.port_num;
1222 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
1223
1224 if (id_priv->id.qp_type == IB_QPT_UD) {
1225 ret = cma_set_qkey(id_priv, 0);
1226 if (ret)
1227 return ret;
1228
1229 qp_attr->qkey = id_priv->qkey;
1230 *qp_attr_mask |= IB_QP_QKEY;
1231 } else {
1232 qp_attr->qp_access_flags = 0;
1233 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
1234 }
1235 return 0;
1236}
1237
1238int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1239 int *qp_attr_mask)
1240{
1241 struct rdma_id_private *id_priv;
1242 int ret = 0;
1243
1244 id_priv = container_of(id, struct rdma_id_private, id);
1245 if (rdma_cap_ib_cm(id->device, id->port_num)) {
1246 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
1247 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
1248 else
1249 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
1250 qp_attr_mask);
1251
1252 if (qp_attr->qp_state == IB_QPS_RTR)
1253 qp_attr->rq_psn = id_priv->seq_num;
1254 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1255 if (!id_priv->cm_id.iw) {
1256 qp_attr->qp_access_flags = 0;
1257 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1258 } else
1259 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1260 qp_attr_mask);
1261 qp_attr->port_num = id_priv->id.port_num;
1262 *qp_attr_mask |= IB_QP_PORT;
1263 } else {
1264 ret = -ENOSYS;
1265 }
1266
1267 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
1268 qp_attr->timeout = id_priv->timeout;
1269
1270 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
1271 qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
1272
1273 return ret;
1274}
1275EXPORT_SYMBOL(rdma_init_qp_attr);
1276
1277static inline bool cma_zero_addr(const struct sockaddr *addr)
1278{
1279 switch (addr->sa_family) {
1280 case AF_INET:
1281 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
1282 case AF_INET6:
1283 return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
1284 case AF_IB:
1285 return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
1286 default:
1287 return false;
1288 }
1289}
1290
1291static inline bool cma_loopback_addr(const struct sockaddr *addr)
1292{
1293 switch (addr->sa_family) {
1294 case AF_INET:
1295 return ipv4_is_loopback(
1296 ((struct sockaddr_in *)addr)->sin_addr.s_addr);
1297 case AF_INET6:
1298 return ipv6_addr_loopback(
1299 &((struct sockaddr_in6 *)addr)->sin6_addr);
1300 case AF_IB:
1301 return ib_addr_loopback(
1302 &((struct sockaddr_ib *)addr)->sib_addr);
1303 default:
1304 return false;
1305 }
1306}
1307
1308static inline bool cma_any_addr(const struct sockaddr *addr)
1309{
1310 return cma_zero_addr(addr) || cma_loopback_addr(addr);
1311}
1312
1313static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
1314{
1315 if (src->sa_family != dst->sa_family)
1316 return -1;
1317
1318 switch (src->sa_family) {
1319 case AF_INET:
1320 return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
1321 ((struct sockaddr_in *)dst)->sin_addr.s_addr;
1322 case AF_INET6: {
1323 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
1324 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
1325 bool link_local;
1326
1327 if (ipv6_addr_cmp(&src_addr6->sin6_addr,
1328 &dst_addr6->sin6_addr))
1329 return 1;
1330 link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
1331 IPV6_ADDR_LINKLOCAL;
1332 /* Link local must match their scope_ids */
1333 return link_local ? (src_addr6->sin6_scope_id !=
1334 dst_addr6->sin6_scope_id) :
1335 0;
1336 }
1337
1338 default:
1339 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
1340 &((struct sockaddr_ib *) dst)->sib_addr);
1341 }
1342}
1343
1344static __be16 cma_port(const struct sockaddr *addr)
1345{
1346 struct sockaddr_ib *sib;
1347
1348 switch (addr->sa_family) {
1349 case AF_INET:
1350 return ((struct sockaddr_in *) addr)->sin_port;
1351 case AF_INET6:
1352 return ((struct sockaddr_in6 *) addr)->sin6_port;
1353 case AF_IB:
1354 sib = (struct sockaddr_ib *) addr;
1355 return htons((u16) (be64_to_cpu(sib->sib_sid) &
1356 be64_to_cpu(sib->sib_sid_mask)));
1357 default:
1358 return 0;
1359 }
1360}
1361
1362static inline int cma_any_port(const struct sockaddr *addr)
1363{
1364 return !cma_port(addr);
1365}
1366
1367static void cma_save_ib_info(struct sockaddr *src_addr,
1368 struct sockaddr *dst_addr,
1369 const struct rdma_cm_id *listen_id,
1370 const struct sa_path_rec *path)
1371{
1372 struct sockaddr_ib *listen_ib, *ib;
1373
1374 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
1375 if (src_addr) {
1376 ib = (struct sockaddr_ib *)src_addr;
1377 ib->sib_family = AF_IB;
1378 if (path) {
1379 ib->sib_pkey = path->pkey;
1380 ib->sib_flowinfo = path->flow_label;
1381 memcpy(&ib->sib_addr, &path->sgid, 16);
1382 ib->sib_sid = path->service_id;
1383 ib->sib_scope_id = 0;
1384 } else {
1385 ib->sib_pkey = listen_ib->sib_pkey;
1386 ib->sib_flowinfo = listen_ib->sib_flowinfo;
1387 ib->sib_addr = listen_ib->sib_addr;
1388 ib->sib_sid = listen_ib->sib_sid;
1389 ib->sib_scope_id = listen_ib->sib_scope_id;
1390 }
1391 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
1392 }
1393 if (dst_addr) {
1394 ib = (struct sockaddr_ib *)dst_addr;
1395 ib->sib_family = AF_IB;
1396 if (path) {
1397 ib->sib_pkey = path->pkey;
1398 ib->sib_flowinfo = path->flow_label;
1399 memcpy(&ib->sib_addr, &path->dgid, 16);
1400 }
1401 }
1402}
1403
1404static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1405 struct sockaddr_in *dst_addr,
1406 struct cma_hdr *hdr,
1407 __be16 local_port)
1408{
1409 if (src_addr) {
1410 *src_addr = (struct sockaddr_in) {
1411 .sin_family = AF_INET,
1412 .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1413 .sin_port = local_port,
1414 };
1415 }
1416
1417 if (dst_addr) {
1418 *dst_addr = (struct sockaddr_in) {
1419 .sin_family = AF_INET,
1420 .sin_addr.s_addr = hdr->src_addr.ip4.addr,
1421 .sin_port = hdr->port,
1422 };
1423 }
1424}
1425
1426static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1427 struct sockaddr_in6 *dst_addr,
1428 struct cma_hdr *hdr,
1429 __be16 local_port)
1430{
1431 if (src_addr) {
1432 *src_addr = (struct sockaddr_in6) {
1433 .sin6_family = AF_INET6,
1434 .sin6_addr = hdr->dst_addr.ip6,
1435 .sin6_port = local_port,
1436 };
1437 }
1438
1439 if (dst_addr) {
1440 *dst_addr = (struct sockaddr_in6) {
1441 .sin6_family = AF_INET6,
1442 .sin6_addr = hdr->src_addr.ip6,
1443 .sin6_port = hdr->port,
1444 };
1445 }
1446}
1447
1448static u16 cma_port_from_service_id(__be64 service_id)
1449{
1450 return (u16)be64_to_cpu(service_id);
1451}
1452
1453static int cma_save_ip_info(struct sockaddr *src_addr,
1454 struct sockaddr *dst_addr,
1455 const struct ib_cm_event *ib_event,
1456 __be64 service_id)
1457{
1458 struct cma_hdr *hdr;
1459 __be16 port;
1460
1461 hdr = ib_event->private_data;
1462 if (hdr->cma_version != CMA_VERSION)
1463 return -EINVAL;
1464
1465 port = htons(cma_port_from_service_id(service_id));
1466
1467 switch (cma_get_ip_ver(hdr)) {
1468 case 4:
1469 cma_save_ip4_info((struct sockaddr_in *)src_addr,
1470 (struct sockaddr_in *)dst_addr, hdr, port);
1471 break;
1472 case 6:
1473 cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1474 (struct sockaddr_in6 *)dst_addr, hdr, port);
1475 break;
1476 default:
1477 return -EAFNOSUPPORT;
1478 }
1479
1480 return 0;
1481}
1482
1483static int cma_save_net_info(struct sockaddr *src_addr,
1484 struct sockaddr *dst_addr,
1485 const struct rdma_cm_id *listen_id,
1486 const struct ib_cm_event *ib_event,
1487 sa_family_t sa_family, __be64 service_id)
1488{
1489 if (sa_family == AF_IB) {
1490 if (ib_event->event == IB_CM_REQ_RECEIVED)
1491 cma_save_ib_info(src_addr, dst_addr, listen_id,
1492 ib_event->param.req_rcvd.primary_path);
1493 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1494 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1495 return 0;
1496 }
1497
1498 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1499}
1500
1501static int cma_save_req_info(const struct ib_cm_event *ib_event,
1502 struct cma_req_info *req)
1503{
1504 const struct ib_cm_req_event_param *req_param =
1505 &ib_event->param.req_rcvd;
1506 const struct ib_cm_sidr_req_event_param *sidr_param =
1507 &ib_event->param.sidr_req_rcvd;
1508
1509 switch (ib_event->event) {
1510 case IB_CM_REQ_RECEIVED:
1511 req->device = req_param->listen_id->device;
1512 req->port = req_param->port;
1513 memcpy(&req->local_gid, &req_param->primary_path->sgid,
1514 sizeof(req->local_gid));
1515 req->has_gid = true;
1516 req->service_id = req_param->primary_path->service_id;
1517 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1518 if (req->pkey != req_param->bth_pkey)
1519 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1520 "RDMA CMA: in the future this may cause the request to be dropped\n",
1521 req_param->bth_pkey, req->pkey);
1522 break;
1523 case IB_CM_SIDR_REQ_RECEIVED:
1524 req->device = sidr_param->listen_id->device;
1525 req->port = sidr_param->port;
1526 req->has_gid = false;
1527 req->service_id = sidr_param->service_id;
1528 req->pkey = sidr_param->pkey;
1529 if (req->pkey != sidr_param->bth_pkey)
1530 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1531 "RDMA CMA: in the future this may cause the request to be dropped\n",
1532 sidr_param->bth_pkey, req->pkey);
1533 break;
1534 default:
1535 return -EINVAL;
1536 }
1537
1538 return 0;
1539}
1540
1541static bool validate_ipv4_net_dev(struct net_device *net_dev,
1542 const struct sockaddr_in *dst_addr,
1543 const struct sockaddr_in *src_addr)
1544{
1545 __be32 daddr = dst_addr->sin_addr.s_addr,
1546 saddr = src_addr->sin_addr.s_addr;
1547 struct fib_result res;
1548 struct flowi4 fl4;
1549 int err;
1550 bool ret;
1551
1552 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1553 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1554 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1555 ipv4_is_loopback(saddr))
1556 return false;
1557
1558 memset(&fl4, 0, sizeof(fl4));
1559 fl4.flowi4_oif = net_dev->ifindex;
1560 fl4.daddr = daddr;
1561 fl4.saddr = saddr;
1562
1563 rcu_read_lock();
1564 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1565 ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1566 rcu_read_unlock();
1567
1568 return ret;
1569}
1570
1571static bool validate_ipv6_net_dev(struct net_device *net_dev,
1572 const struct sockaddr_in6 *dst_addr,
1573 const struct sockaddr_in6 *src_addr)
1574{
1575#if IS_ENABLED(CONFIG_IPV6)
1576 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
1577 IPV6_ADDR_LINKLOCAL;
1578 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
1579 &src_addr->sin6_addr, net_dev->ifindex,
1580 NULL, strict);
1581 bool ret;
1582
1583 if (!rt)
1584 return false;
1585
1586 ret = rt->rt6i_idev->dev == net_dev;
1587 ip6_rt_put(rt);
1588
1589 return ret;
1590#else
1591 return false;
1592#endif
1593}
1594
1595static bool validate_net_dev(struct net_device *net_dev,
1596 const struct sockaddr *daddr,
1597 const struct sockaddr *saddr)
1598{
1599 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1600 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1601 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1602 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1603
1604 switch (daddr->sa_family) {
1605 case AF_INET:
1606 return saddr->sa_family == AF_INET &&
1607 validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1608
1609 case AF_INET6:
1610 return saddr->sa_family == AF_INET6 &&
1611 validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1612
1613 default:
1614 return false;
1615 }
1616}
1617
1618static struct net_device *
1619roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
1620{
1621 const struct ib_gid_attr *sgid_attr = NULL;
1622 struct net_device *ndev;
1623
1624 if (ib_event->event == IB_CM_REQ_RECEIVED)
1625 sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
1626 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1627 sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
1628
1629 if (!sgid_attr)
1630 return NULL;
1631
1632 rcu_read_lock();
1633 ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
1634 if (IS_ERR(ndev))
1635 ndev = NULL;
1636 else
1637 dev_hold(ndev);
1638 rcu_read_unlock();
1639 return ndev;
1640}
1641
1642static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
1643 struct cma_req_info *req)
1644{
1645 struct sockaddr *listen_addr =
1646 (struct sockaddr *)&req->listen_addr_storage;
1647 struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
1648 struct net_device *net_dev;
1649 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1650 int err;
1651
1652 err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1653 req->service_id);
1654 if (err)
1655 return ERR_PTR(err);
1656
1657 if (rdma_protocol_roce(req->device, req->port))
1658 net_dev = roce_get_net_dev_by_cm_event(ib_event);
1659 else
1660 net_dev = ib_get_net_dev_by_params(req->device, req->port,
1661 req->pkey,
1662 gid, listen_addr);
1663 if (!net_dev)
1664 return ERR_PTR(-ENODEV);
1665
1666 return net_dev;
1667}
1668
1669static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
1670{
1671 return (be64_to_cpu(service_id) >> 16) & 0xffff;
1672}
1673
1674static bool cma_match_private_data(struct rdma_id_private *id_priv,
1675 const struct cma_hdr *hdr)
1676{
1677 struct sockaddr *addr = cma_src_addr(id_priv);
1678 __be32 ip4_addr;
1679 struct in6_addr ip6_addr;
1680
1681 if (cma_any_addr(addr) && !id_priv->afonly)
1682 return true;
1683
1684 switch (addr->sa_family) {
1685 case AF_INET:
1686 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1687 if (cma_get_ip_ver(hdr) != 4)
1688 return false;
1689 if (!cma_any_addr(addr) &&
1690 hdr->dst_addr.ip4.addr != ip4_addr)
1691 return false;
1692 break;
1693 case AF_INET6:
1694 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1695 if (cma_get_ip_ver(hdr) != 6)
1696 return false;
1697 if (!cma_any_addr(addr) &&
1698 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1699 return false;
1700 break;
1701 case AF_IB:
1702 return true;
1703 default:
1704 return false;
1705 }
1706
1707 return true;
1708}
1709
1710static bool cma_protocol_roce(const struct rdma_cm_id *id)
1711{
1712 struct ib_device *device = id->device;
1713 const u32 port_num = id->port_num ?: rdma_start_port(device);
1714
1715 return rdma_protocol_roce(device, port_num);
1716}
1717
1718static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
1719{
1720 const struct sockaddr *daddr =
1721 (const struct sockaddr *)&req->listen_addr_storage;
1722 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1723
1724 /* Returns true if the req is for IPv6 link local */
1725 return (daddr->sa_family == AF_INET6 &&
1726 (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
1727}
1728
1729static bool cma_match_net_dev(const struct rdma_cm_id *id,
1730 const struct net_device *net_dev,
1731 const struct cma_req_info *req)
1732{
1733 const struct rdma_addr *addr = &id->route.addr;
1734
1735 if (!net_dev)
1736 /* This request is an AF_IB request */
1737 return (!id->port_num || id->port_num == req->port) &&
1738 (addr->src_addr.ss_family == AF_IB);
1739
1740 /*
1741 * If the request is not for IPv6 link local, allow matching
1742 * request to any netdevice of the one or multiport rdma device.
1743 */
1744 if (!cma_is_req_ipv6_ll(req))
1745 return true;
1746 /*
1747 * Net namespaces must match, and if the listner is listening
1748 * on a specific netdevice than netdevice must match as well.
1749 */
1750 if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1751 (!!addr->dev_addr.bound_dev_if ==
1752 (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1753 return true;
1754 else
1755 return false;
1756}
1757
1758static struct rdma_id_private *cma_find_listener(
1759 const struct rdma_bind_list *bind_list,
1760 const struct ib_cm_id *cm_id,
1761 const struct ib_cm_event *ib_event,
1762 const struct cma_req_info *req,
1763 const struct net_device *net_dev)
1764{
1765 struct rdma_id_private *id_priv, *id_priv_dev;
1766
1767 lockdep_assert_held(&lock);
1768
1769 if (!bind_list)
1770 return ERR_PTR(-EINVAL);
1771
1772 hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1773 if (cma_match_private_data(id_priv, ib_event->private_data)) {
1774 if (id_priv->id.device == cm_id->device &&
1775 cma_match_net_dev(&id_priv->id, net_dev, req))
1776 return id_priv;
1777 list_for_each_entry(id_priv_dev,
1778 &id_priv->listen_list,
1779 listen_item) {
1780 if (id_priv_dev->id.device == cm_id->device &&
1781 cma_match_net_dev(&id_priv_dev->id,
1782 net_dev, req))
1783 return id_priv_dev;
1784 }
1785 }
1786 }
1787
1788 return ERR_PTR(-EINVAL);
1789}
1790
1791static struct rdma_id_private *
1792cma_ib_id_from_event(struct ib_cm_id *cm_id,
1793 const struct ib_cm_event *ib_event,
1794 struct cma_req_info *req,
1795 struct net_device **net_dev)
1796{
1797 struct rdma_bind_list *bind_list;
1798 struct rdma_id_private *id_priv;
1799 int err;
1800
1801 err = cma_save_req_info(ib_event, req);
1802 if (err)
1803 return ERR_PTR(err);
1804
1805 *net_dev = cma_get_net_dev(ib_event, req);
1806 if (IS_ERR(*net_dev)) {
1807 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
1808 /* Assuming the protocol is AF_IB */
1809 *net_dev = NULL;
1810 } else {
1811 return ERR_CAST(*net_dev);
1812 }
1813 }
1814
1815 mutex_lock(&lock);
1816 /*
1817 * Net namespace might be getting deleted while route lookup,
1818 * cm_id lookup is in progress. Therefore, perform netdevice
1819 * validation, cm_id lookup under rcu lock.
1820 * RCU lock along with netdevice state check, synchronizes with
1821 * netdevice migrating to different net namespace and also avoids
1822 * case where net namespace doesn't get deleted while lookup is in
1823 * progress.
1824 * If the device state is not IFF_UP, its properties such as ifindex
1825 * and nd_net cannot be trusted to remain valid without rcu lock.
1826 * net/core/dev.c change_net_namespace() ensures to synchronize with
1827 * ongoing operations on net device after device is closed using
1828 * synchronize_net().
1829 */
1830 rcu_read_lock();
1831 if (*net_dev) {
1832 /*
1833 * If netdevice is down, it is likely that it is administratively
1834 * down or it might be migrating to different namespace.
1835 * In that case avoid further processing, as the net namespace
1836 * or ifindex may change.
1837 */
1838 if (((*net_dev)->flags & IFF_UP) == 0) {
1839 id_priv = ERR_PTR(-EHOSTUNREACH);
1840 goto err;
1841 }
1842
1843 if (!validate_net_dev(*net_dev,
1844 (struct sockaddr *)&req->src_addr_storage,
1845 (struct sockaddr *)&req->listen_addr_storage)) {
1846 id_priv = ERR_PTR(-EHOSTUNREACH);
1847 goto err;
1848 }
1849 }
1850
1851 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
1852 rdma_ps_from_service_id(req->service_id),
1853 cma_port_from_service_id(req->service_id));
1854 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
1855err:
1856 rcu_read_unlock();
1857 mutex_unlock(&lock);
1858 if (IS_ERR(id_priv) && *net_dev) {
1859 dev_put(*net_dev);
1860 *net_dev = NULL;
1861 }
1862 return id_priv;
1863}
1864
1865static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
1866{
1867 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1868}
1869
1870static void cma_cancel_route(struct rdma_id_private *id_priv)
1871{
1872 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1873 if (id_priv->query)
1874 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1875 }
1876}
1877
1878static void _cma_cancel_listens(struct rdma_id_private *id_priv)
1879{
1880 struct rdma_id_private *dev_id_priv;
1881
1882 lockdep_assert_held(&lock);
1883
1884 /*
1885 * Remove from listen_any_list to prevent added devices from spawning
1886 * additional listen requests.
1887 */
1888 list_del_init(&id_priv->listen_any_item);
1889
1890 while (!list_empty(&id_priv->listen_list)) {
1891 dev_id_priv =
1892 list_first_entry(&id_priv->listen_list,
1893 struct rdma_id_private, listen_item);
1894 /* sync with device removal to avoid duplicate destruction */
1895 list_del_init(&dev_id_priv->device_item);
1896 list_del_init(&dev_id_priv->listen_item);
1897 mutex_unlock(&lock);
1898
1899 rdma_destroy_id(&dev_id_priv->id);
1900 mutex_lock(&lock);
1901 }
1902}
1903
1904static void cma_cancel_listens(struct rdma_id_private *id_priv)
1905{
1906 mutex_lock(&lock);
1907 _cma_cancel_listens(id_priv);
1908 mutex_unlock(&lock);
1909}
1910
1911static void cma_cancel_operation(struct rdma_id_private *id_priv,
1912 enum rdma_cm_state state)
1913{
1914 switch (state) {
1915 case RDMA_CM_ADDR_QUERY:
1916 /*
1917 * We can avoid doing the rdma_addr_cancel() based on state,
1918 * only RDMA_CM_ADDR_QUERY has a work that could still execute.
1919 * Notice that the addr_handler work could still be exiting
1920 * outside this state, however due to the interaction with the
1921 * handler_mutex the work is guaranteed not to touch id_priv
1922 * during exit.
1923 */
1924 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1925 break;
1926 case RDMA_CM_ROUTE_QUERY:
1927 cma_cancel_route(id_priv);
1928 break;
1929 case RDMA_CM_LISTEN:
1930 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1931 cma_cancel_listens(id_priv);
1932 break;
1933 default:
1934 break;
1935 }
1936}
1937
1938static void cma_release_port(struct rdma_id_private *id_priv)
1939{
1940 struct rdma_bind_list *bind_list = id_priv->bind_list;
1941 struct net *net = id_priv->id.route.addr.dev_addr.net;
1942
1943 if (!bind_list)
1944 return;
1945
1946 mutex_lock(&lock);
1947 hlist_del(&id_priv->node);
1948 if (hlist_empty(&bind_list->owners)) {
1949 cma_ps_remove(net, bind_list->ps, bind_list->port);
1950 kfree(bind_list);
1951 }
1952 mutex_unlock(&lock);
1953}
1954
1955static void destroy_mc(struct rdma_id_private *id_priv,
1956 struct cma_multicast *mc)
1957{
1958 bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
1959
1960 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
1961 ib_sa_free_multicast(mc->sa_mc);
1962
1963 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
1964 struct rdma_dev_addr *dev_addr =
1965 &id_priv->id.route.addr.dev_addr;
1966 struct net_device *ndev = NULL;
1967
1968 if (dev_addr->bound_dev_if)
1969 ndev = dev_get_by_index(dev_addr->net,
1970 dev_addr->bound_dev_if);
1971 if (ndev && !send_only) {
1972 enum ib_gid_type gid_type;
1973 union ib_gid mgid;
1974
1975 gid_type = id_priv->cma_dev->default_gid_type
1976 [id_priv->id.port_num -
1977 rdma_start_port(
1978 id_priv->cma_dev->device)];
1979 cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
1980 gid_type);
1981 cma_igmp_send(ndev, &mgid, false);
1982 }
1983 dev_put(ndev);
1984
1985 cancel_work_sync(&mc->iboe_join.work);
1986 }
1987 kfree(mc);
1988}
1989
1990static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1991{
1992 struct cma_multicast *mc;
1993
1994 while (!list_empty(&id_priv->mc_list)) {
1995 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
1996 list);
1997 list_del(&mc->list);
1998 destroy_mc(id_priv, mc);
1999 }
2000}
2001
2002static void _destroy_id(struct rdma_id_private *id_priv,
2003 enum rdma_cm_state state)
2004{
2005 cma_cancel_operation(id_priv, state);
2006
2007 rdma_restrack_del(&id_priv->res);
2008 cma_remove_id_from_tree(id_priv);
2009 if (id_priv->cma_dev) {
2010 if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
2011 if (id_priv->cm_id.ib)
2012 ib_destroy_cm_id(id_priv->cm_id.ib);
2013 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
2014 if (id_priv->cm_id.iw)
2015 iw_destroy_cm_id(id_priv->cm_id.iw);
2016 }
2017 cma_leave_mc_groups(id_priv);
2018 cma_release_dev(id_priv);
2019 }
2020
2021 cma_release_port(id_priv);
2022 cma_id_put(id_priv);
2023 wait_for_completion(&id_priv->comp);
2024
2025 if (id_priv->internal_id)
2026 cma_id_put(id_priv->id.context);
2027
2028 kfree(id_priv->id.route.path_rec);
2029 kfree(id_priv->id.route.path_rec_inbound);
2030 kfree(id_priv->id.route.path_rec_outbound);
2031
2032 put_net(id_priv->id.route.addr.dev_addr.net);
2033 kfree(id_priv);
2034}
2035
2036/*
2037 * destroy an ID from within the handler_mutex. This ensures that no other
2038 * handlers can start running concurrently.
2039 */
2040static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
2041 __releases(&idprv->handler_mutex)
2042{
2043 enum rdma_cm_state state;
2044 unsigned long flags;
2045
2046 trace_cm_id_destroy(id_priv);
2047
2048 /*
2049 * Setting the state to destroyed under the handler mutex provides a
2050 * fence against calling handler callbacks. If this is invoked due to
2051 * the failure of a handler callback then it guarentees that no future
2052 * handlers will be called.
2053 */
2054 lockdep_assert_held(&id_priv->handler_mutex);
2055 spin_lock_irqsave(&id_priv->lock, flags);
2056 state = id_priv->state;
2057 id_priv->state = RDMA_CM_DESTROYING;
2058 spin_unlock_irqrestore(&id_priv->lock, flags);
2059 mutex_unlock(&id_priv->handler_mutex);
2060 _destroy_id(id_priv, state);
2061}
2062
2063void rdma_destroy_id(struct rdma_cm_id *id)
2064{
2065 struct rdma_id_private *id_priv =
2066 container_of(id, struct rdma_id_private, id);
2067
2068 mutex_lock(&id_priv->handler_mutex);
2069 destroy_id_handler_unlock(id_priv);
2070}
2071EXPORT_SYMBOL(rdma_destroy_id);
2072
2073static int cma_rep_recv(struct rdma_id_private *id_priv)
2074{
2075 int ret;
2076
2077 ret = cma_modify_qp_rtr(id_priv, NULL);
2078 if (ret)
2079 goto reject;
2080
2081 ret = cma_modify_qp_rts(id_priv, NULL);
2082 if (ret)
2083 goto reject;
2084
2085 trace_cm_send_rtu(id_priv);
2086 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
2087 if (ret)
2088 goto reject;
2089
2090 return 0;
2091reject:
2092 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
2093 cma_modify_qp_err(id_priv);
2094 trace_cm_send_rej(id_priv);
2095 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
2096 NULL, 0, NULL, 0);
2097 return ret;
2098}
2099
2100static void cma_set_rep_event_data(struct rdma_cm_event *event,
2101 const struct ib_cm_rep_event_param *rep_data,
2102 void *private_data)
2103{
2104 event->param.conn.private_data = private_data;
2105 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
2106 event->param.conn.responder_resources = rep_data->responder_resources;
2107 event->param.conn.initiator_depth = rep_data->initiator_depth;
2108 event->param.conn.flow_control = rep_data->flow_control;
2109 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
2110 event->param.conn.srq = rep_data->srq;
2111 event->param.conn.qp_num = rep_data->remote_qpn;
2112
2113 event->ece.vendor_id = rep_data->ece.vendor_id;
2114 event->ece.attr_mod = rep_data->ece.attr_mod;
2115}
2116
2117static int cma_cm_event_handler(struct rdma_id_private *id_priv,
2118 struct rdma_cm_event *event)
2119{
2120 int ret;
2121
2122 lockdep_assert_held(&id_priv->handler_mutex);
2123
2124 trace_cm_event_handler(id_priv, event);
2125 ret = id_priv->id.event_handler(&id_priv->id, event);
2126 trace_cm_event_done(id_priv, event, ret);
2127 return ret;
2128}
2129
2130static int cma_ib_handler(struct ib_cm_id *cm_id,
2131 const struct ib_cm_event *ib_event)
2132{
2133 struct rdma_id_private *id_priv = cm_id->context;
2134 struct rdma_cm_event event = {};
2135 enum rdma_cm_state state;
2136 int ret;
2137
2138 mutex_lock(&id_priv->handler_mutex);
2139 state = READ_ONCE(id_priv->state);
2140 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
2141 state != RDMA_CM_CONNECT) ||
2142 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
2143 state != RDMA_CM_DISCONNECT))
2144 goto out;
2145
2146 switch (ib_event->event) {
2147 case IB_CM_REQ_ERROR:
2148 case IB_CM_REP_ERROR:
2149 event.event = RDMA_CM_EVENT_UNREACHABLE;
2150 event.status = -ETIMEDOUT;
2151 break;
2152 case IB_CM_REP_RECEIVED:
2153 if (state == RDMA_CM_CONNECT &&
2154 (id_priv->id.qp_type != IB_QPT_UD)) {
2155 trace_cm_send_mra(id_priv);
2156 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2157 }
2158 if (id_priv->id.qp) {
2159 event.status = cma_rep_recv(id_priv);
2160 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
2161 RDMA_CM_EVENT_ESTABLISHED;
2162 } else {
2163 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
2164 }
2165 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
2166 ib_event->private_data);
2167 break;
2168 case IB_CM_RTU_RECEIVED:
2169 case IB_CM_USER_ESTABLISHED:
2170 event.event = RDMA_CM_EVENT_ESTABLISHED;
2171 break;
2172 case IB_CM_DREQ_ERROR:
2173 event.status = -ETIMEDOUT;
2174 fallthrough;
2175 case IB_CM_DREQ_RECEIVED:
2176 case IB_CM_DREP_RECEIVED:
2177 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
2178 RDMA_CM_DISCONNECT))
2179 goto out;
2180 event.event = RDMA_CM_EVENT_DISCONNECTED;
2181 break;
2182 case IB_CM_TIMEWAIT_EXIT:
2183 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
2184 break;
2185 case IB_CM_MRA_RECEIVED:
2186 /* ignore event */
2187 goto out;
2188 case IB_CM_REJ_RECEIVED:
2189 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
2190 ib_event->param.rej_rcvd.reason));
2191 cma_modify_qp_err(id_priv);
2192 event.status = ib_event->param.rej_rcvd.reason;
2193 event.event = RDMA_CM_EVENT_REJECTED;
2194 event.param.conn.private_data = ib_event->private_data;
2195 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
2196 break;
2197 default:
2198 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2199 ib_event->event);
2200 goto out;
2201 }
2202
2203 ret = cma_cm_event_handler(id_priv, &event);
2204 if (ret) {
2205 /* Destroy the CM ID by returning a non-zero value. */
2206 id_priv->cm_id.ib = NULL;
2207 destroy_id_handler_unlock(id_priv);
2208 return ret;
2209 }
2210out:
2211 mutex_unlock(&id_priv->handler_mutex);
2212 return 0;
2213}
2214
2215static struct rdma_id_private *
2216cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
2217 const struct ib_cm_event *ib_event,
2218 struct net_device *net_dev)
2219{
2220 struct rdma_id_private *listen_id_priv;
2221 struct rdma_id_private *id_priv;
2222 struct rdma_cm_id *id;
2223 struct rdma_route *rt;
2224 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2225 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
2226 const __be64 service_id =
2227 ib_event->param.req_rcvd.primary_path->service_id;
2228 int ret;
2229
2230 listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2231 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
2232 listen_id->event_handler, listen_id->context,
2233 listen_id->ps,
2234 ib_event->param.req_rcvd.qp_type,
2235 listen_id_priv);
2236 if (IS_ERR(id_priv))
2237 return NULL;
2238
2239 id = &id_priv->id;
2240 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2241 (struct sockaddr *)&id->route.addr.dst_addr,
2242 listen_id, ib_event, ss_family, service_id))
2243 goto err;
2244
2245 rt = &id->route;
2246 rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
2247 rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
2248 sizeof(*rt->path_rec), GFP_KERNEL);
2249 if (!rt->path_rec)
2250 goto err;
2251
2252 rt->path_rec[0] = *path;
2253 if (rt->num_pri_alt_paths == 2)
2254 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
2255
2256 if (net_dev) {
2257 rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
2258 } else {
2259 if (!cma_protocol_roce(listen_id) &&
2260 cma_any_addr(cma_src_addr(id_priv))) {
2261 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
2262 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
2263 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
2264 } else if (!cma_any_addr(cma_src_addr(id_priv))) {
2265 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
2266 if (ret)
2267 goto err;
2268 }
2269 }
2270 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
2271
2272 id_priv->state = RDMA_CM_CONNECT;
2273 return id_priv;
2274
2275err:
2276 rdma_destroy_id(id);
2277 return NULL;
2278}
2279
2280static struct rdma_id_private *
2281cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
2282 const struct ib_cm_event *ib_event,
2283 struct net_device *net_dev)
2284{
2285 const struct rdma_id_private *listen_id_priv;
2286 struct rdma_id_private *id_priv;
2287 struct rdma_cm_id *id;
2288 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2289 struct net *net = listen_id->route.addr.dev_addr.net;
2290 int ret;
2291
2292 listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2293 id_priv = __rdma_create_id(net, listen_id->event_handler,
2294 listen_id->context, listen_id->ps, IB_QPT_UD,
2295 listen_id_priv);
2296 if (IS_ERR(id_priv))
2297 return NULL;
2298
2299 id = &id_priv->id;
2300 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2301 (struct sockaddr *)&id->route.addr.dst_addr,
2302 listen_id, ib_event, ss_family,
2303 ib_event->param.sidr_req_rcvd.service_id))
2304 goto err;
2305
2306 if (net_dev) {
2307 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
2308 } else {
2309 if (!cma_any_addr(cma_src_addr(id_priv))) {
2310 ret = cma_translate_addr(cma_src_addr(id_priv),
2311 &id->route.addr.dev_addr);
2312 if (ret)
2313 goto err;
2314 }
2315 }
2316
2317 id_priv->state = RDMA_CM_CONNECT;
2318 return id_priv;
2319err:
2320 rdma_destroy_id(id);
2321 return NULL;
2322}
2323
2324static void cma_set_req_event_data(struct rdma_cm_event *event,
2325 const struct ib_cm_req_event_param *req_data,
2326 void *private_data, int offset)
2327{
2328 event->param.conn.private_data = private_data + offset;
2329 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
2330 event->param.conn.responder_resources = req_data->responder_resources;
2331 event->param.conn.initiator_depth = req_data->initiator_depth;
2332 event->param.conn.flow_control = req_data->flow_control;
2333 event->param.conn.retry_count = req_data->retry_count;
2334 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
2335 event->param.conn.srq = req_data->srq;
2336 event->param.conn.qp_num = req_data->remote_qpn;
2337
2338 event->ece.vendor_id = req_data->ece.vendor_id;
2339 event->ece.attr_mod = req_data->ece.attr_mod;
2340}
2341
2342static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
2343 const struct ib_cm_event *ib_event)
2344{
2345 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
2346 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
2347 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
2348 (id->qp_type == IB_QPT_UD)) ||
2349 (!id->qp_type));
2350}
2351
2352static int cma_ib_req_handler(struct ib_cm_id *cm_id,
2353 const struct ib_cm_event *ib_event)
2354{
2355 struct rdma_id_private *listen_id, *conn_id = NULL;
2356 struct rdma_cm_event event = {};
2357 struct cma_req_info req = {};
2358 struct net_device *net_dev;
2359 u8 offset;
2360 int ret;
2361
2362 listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
2363 if (IS_ERR(listen_id))
2364 return PTR_ERR(listen_id);
2365
2366 trace_cm_req_handler(listen_id, ib_event->event);
2367 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
2368 ret = -EINVAL;
2369 goto net_dev_put;
2370 }
2371
2372 mutex_lock(&listen_id->handler_mutex);
2373 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
2374 ret = -ECONNABORTED;
2375 goto err_unlock;
2376 }
2377
2378 offset = cma_user_data_offset(listen_id);
2379 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2380 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
2381 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
2382 event.param.ud.private_data = ib_event->private_data + offset;
2383 event.param.ud.private_data_len =
2384 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
2385 } else {
2386 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
2387 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
2388 ib_event->private_data, offset);
2389 }
2390 if (!conn_id) {
2391 ret = -ENOMEM;
2392 goto err_unlock;
2393 }
2394
2395 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2396 ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
2397 if (ret) {
2398 destroy_id_handler_unlock(conn_id);
2399 goto err_unlock;
2400 }
2401
2402 conn_id->cm_id.ib = cm_id;
2403 cm_id->context = conn_id;
2404 cm_id->cm_handler = cma_ib_handler;
2405
2406 ret = cma_cm_event_handler(conn_id, &event);
2407 if (ret) {
2408 /* Destroy the CM ID by returning a non-zero value. */
2409 conn_id->cm_id.ib = NULL;
2410 mutex_unlock(&listen_id->handler_mutex);
2411 destroy_id_handler_unlock(conn_id);
2412 goto net_dev_put;
2413 }
2414
2415 if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
2416 conn_id->id.qp_type != IB_QPT_UD) {
2417 trace_cm_send_mra(cm_id->context);
2418 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2419 }
2420 mutex_unlock(&conn_id->handler_mutex);
2421
2422err_unlock:
2423 mutex_unlock(&listen_id->handler_mutex);
2424
2425net_dev_put:
2426 if (net_dev)
2427 dev_put(net_dev);
2428
2429 return ret;
2430}
2431
2432__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2433{
2434 if (addr->sa_family == AF_IB)
2435 return ((struct sockaddr_ib *) addr)->sib_sid;
2436
2437 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2438}
2439EXPORT_SYMBOL(rdma_get_service_id);
2440
2441void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
2442 union ib_gid *dgid)
2443{
2444 struct rdma_addr *addr = &cm_id->route.addr;
2445
2446 if (!cm_id->device) {
2447 if (sgid)
2448 memset(sgid, 0, sizeof(*sgid));
2449 if (dgid)
2450 memset(dgid, 0, sizeof(*dgid));
2451 return;
2452 }
2453
2454 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
2455 if (sgid)
2456 rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
2457 if (dgid)
2458 rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
2459 } else {
2460 if (sgid)
2461 rdma_addr_get_sgid(&addr->dev_addr, sgid);
2462 if (dgid)
2463 rdma_addr_get_dgid(&addr->dev_addr, dgid);
2464 }
2465}
2466EXPORT_SYMBOL(rdma_read_gids);
2467
2468static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2469{
2470 struct rdma_id_private *id_priv = iw_id->context;
2471 struct rdma_cm_event event = {};
2472 int ret = 0;
2473 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2474 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2475
2476 mutex_lock(&id_priv->handler_mutex);
2477 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
2478 goto out;
2479
2480 switch (iw_event->event) {
2481 case IW_CM_EVENT_CLOSE:
2482 event.event = RDMA_CM_EVENT_DISCONNECTED;
2483 break;
2484 case IW_CM_EVENT_CONNECT_REPLY:
2485 memcpy(cma_src_addr(id_priv), laddr,
2486 rdma_addr_size(laddr));
2487 memcpy(cma_dst_addr(id_priv), raddr,
2488 rdma_addr_size(raddr));
2489 switch (iw_event->status) {
2490 case 0:
2491 event.event = RDMA_CM_EVENT_ESTABLISHED;
2492 event.param.conn.initiator_depth = iw_event->ird;
2493 event.param.conn.responder_resources = iw_event->ord;
2494 break;
2495 case -ECONNRESET:
2496 case -ECONNREFUSED:
2497 event.event = RDMA_CM_EVENT_REJECTED;
2498 break;
2499 case -ETIMEDOUT:
2500 event.event = RDMA_CM_EVENT_UNREACHABLE;
2501 break;
2502 default:
2503 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
2504 break;
2505 }
2506 break;
2507 case IW_CM_EVENT_ESTABLISHED:
2508 event.event = RDMA_CM_EVENT_ESTABLISHED;
2509 event.param.conn.initiator_depth = iw_event->ird;
2510 event.param.conn.responder_resources = iw_event->ord;
2511 break;
2512 default:
2513 goto out;
2514 }
2515
2516 event.status = iw_event->status;
2517 event.param.conn.private_data = iw_event->private_data;
2518 event.param.conn.private_data_len = iw_event->private_data_len;
2519 ret = cma_cm_event_handler(id_priv, &event);
2520 if (ret) {
2521 /* Destroy the CM ID by returning a non-zero value. */
2522 id_priv->cm_id.iw = NULL;
2523 destroy_id_handler_unlock(id_priv);
2524 return ret;
2525 }
2526
2527out:
2528 mutex_unlock(&id_priv->handler_mutex);
2529 return ret;
2530}
2531
2532static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2533 struct iw_cm_event *iw_event)
2534{
2535 struct rdma_id_private *listen_id, *conn_id;
2536 struct rdma_cm_event event = {};
2537 int ret = -ECONNABORTED;
2538 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2539 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2540
2541 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2542 event.param.conn.private_data = iw_event->private_data;
2543 event.param.conn.private_data_len = iw_event->private_data_len;
2544 event.param.conn.initiator_depth = iw_event->ird;
2545 event.param.conn.responder_resources = iw_event->ord;
2546
2547 listen_id = cm_id->context;
2548
2549 mutex_lock(&listen_id->handler_mutex);
2550 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
2551 goto out;
2552
2553 /* Create a new RDMA id for the new IW CM ID */
2554 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2555 listen_id->id.event_handler,
2556 listen_id->id.context, RDMA_PS_TCP,
2557 IB_QPT_RC, listen_id);
2558 if (IS_ERR(conn_id)) {
2559 ret = -ENOMEM;
2560 goto out;
2561 }
2562 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2563 conn_id->state = RDMA_CM_CONNECT;
2564
2565 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2566 if (ret) {
2567 mutex_unlock(&listen_id->handler_mutex);
2568 destroy_id_handler_unlock(conn_id);
2569 return ret;
2570 }
2571
2572 ret = cma_iw_acquire_dev(conn_id, listen_id);
2573 if (ret) {
2574 mutex_unlock(&listen_id->handler_mutex);
2575 destroy_id_handler_unlock(conn_id);
2576 return ret;
2577 }
2578
2579 conn_id->cm_id.iw = cm_id;
2580 cm_id->context = conn_id;
2581 cm_id->cm_handler = cma_iw_handler;
2582
2583 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
2584 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
2585
2586 ret = cma_cm_event_handler(conn_id, &event);
2587 if (ret) {
2588 /* User wants to destroy the CM ID */
2589 conn_id->cm_id.iw = NULL;
2590 mutex_unlock(&listen_id->handler_mutex);
2591 destroy_id_handler_unlock(conn_id);
2592 return ret;
2593 }
2594
2595 mutex_unlock(&conn_id->handler_mutex);
2596
2597out:
2598 mutex_unlock(&listen_id->handler_mutex);
2599 return ret;
2600}
2601
2602static int cma_ib_listen(struct rdma_id_private *id_priv)
2603{
2604 struct sockaddr *addr;
2605 struct ib_cm_id *id;
2606 __be64 svc_id;
2607
2608 addr = cma_src_addr(id_priv);
2609 svc_id = rdma_get_service_id(&id_priv->id, addr);
2610 id = ib_cm_insert_listen(id_priv->id.device,
2611 cma_ib_req_handler, svc_id);
2612 if (IS_ERR(id))
2613 return PTR_ERR(id);
2614 id_priv->cm_id.ib = id;
2615
2616 return 0;
2617}
2618
2619static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
2620{
2621 int ret;
2622 struct iw_cm_id *id;
2623
2624 id = iw_create_cm_id(id_priv->id.device,
2625 iw_conn_req_handler,
2626 id_priv);
2627 if (IS_ERR(id))
2628 return PTR_ERR(id);
2629
2630 mutex_lock(&id_priv->qp_mutex);
2631 id->tos = id_priv->tos;
2632 id->tos_set = id_priv->tos_set;
2633 mutex_unlock(&id_priv->qp_mutex);
2634 id->afonly = id_priv->afonly;
2635 id_priv->cm_id.iw = id;
2636
2637 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2638 rdma_addr_size(cma_src_addr(id_priv)));
2639
2640 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2641
2642 if (ret) {
2643 iw_destroy_cm_id(id_priv->cm_id.iw);
2644 id_priv->cm_id.iw = NULL;
2645 }
2646
2647 return ret;
2648}
2649
2650static int cma_listen_handler(struct rdma_cm_id *id,
2651 struct rdma_cm_event *event)
2652{
2653 struct rdma_id_private *id_priv = id->context;
2654
2655 /* Listening IDs are always destroyed on removal */
2656 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
2657 return -1;
2658
2659 id->context = id_priv->id.context;
2660 id->event_handler = id_priv->id.event_handler;
2661 trace_cm_event_handler(id_priv, event);
2662 return id_priv->id.event_handler(id, event);
2663}
2664
2665static int cma_listen_on_dev(struct rdma_id_private *id_priv,
2666 struct cma_device *cma_dev,
2667 struct rdma_id_private **to_destroy)
2668{
2669 struct rdma_id_private *dev_id_priv;
2670 struct net *net = id_priv->id.route.addr.dev_addr.net;
2671 int ret;
2672
2673 lockdep_assert_held(&lock);
2674
2675 *to_destroy = NULL;
2676 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2677 return 0;
2678
2679 dev_id_priv =
2680 __rdma_create_id(net, cma_listen_handler, id_priv,
2681 id_priv->id.ps, id_priv->id.qp_type, id_priv);
2682 if (IS_ERR(dev_id_priv))
2683 return PTR_ERR(dev_id_priv);
2684
2685 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2686 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2687 rdma_addr_size(cma_src_addr(id_priv)));
2688
2689 _cma_attach_to_dev(dev_id_priv, cma_dev);
2690 rdma_restrack_add(&dev_id_priv->res);
2691 cma_id_get(id_priv);
2692 dev_id_priv->internal_id = 1;
2693 dev_id_priv->afonly = id_priv->afonly;
2694 mutex_lock(&id_priv->qp_mutex);
2695 dev_id_priv->tos_set = id_priv->tos_set;
2696 dev_id_priv->tos = id_priv->tos;
2697 mutex_unlock(&id_priv->qp_mutex);
2698
2699 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
2700 if (ret)
2701 goto err_listen;
2702 list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
2703 return 0;
2704err_listen:
2705 /* Caller must destroy this after releasing lock */
2706 *to_destroy = dev_id_priv;
2707 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
2708 return ret;
2709}
2710
2711static int cma_listen_on_all(struct rdma_id_private *id_priv)
2712{
2713 struct rdma_id_private *to_destroy;
2714 struct cma_device *cma_dev;
2715 int ret;
2716
2717 mutex_lock(&lock);
2718 list_add_tail(&id_priv->listen_any_item, &listen_any_list);
2719 list_for_each_entry(cma_dev, &dev_list, list) {
2720 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
2721 if (ret) {
2722 /* Prevent racing with cma_process_remove() */
2723 if (to_destroy)
2724 list_del_init(&to_destroy->device_item);
2725 goto err_listen;
2726 }
2727 }
2728 mutex_unlock(&lock);
2729 return 0;
2730
2731err_listen:
2732 _cma_cancel_listens(id_priv);
2733 mutex_unlock(&lock);
2734 if (to_destroy)
2735 rdma_destroy_id(&to_destroy->id);
2736 return ret;
2737}
2738
2739void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2740{
2741 struct rdma_id_private *id_priv;
2742
2743 id_priv = container_of(id, struct rdma_id_private, id);
2744 mutex_lock(&id_priv->qp_mutex);
2745 id_priv->tos = (u8) tos;
2746 id_priv->tos_set = true;
2747 mutex_unlock(&id_priv->qp_mutex);
2748}
2749EXPORT_SYMBOL(rdma_set_service_type);
2750
2751/**
2752 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2753 * with a connection identifier.
2754 * @id: Communication identifier to associated with service type.
2755 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2756 *
2757 * This function should be called before rdma_connect() on active side,
2758 * and on passive side before rdma_accept(). It is applicable to primary
2759 * path only. The timeout will affect the local side of the QP, it is not
2760 * negotiated with remote side and zero disables the timer. In case it is
2761 * set before rdma_resolve_route, the value will also be used to determine
2762 * PacketLifeTime for RoCE.
2763 *
2764 * Return: 0 for success
2765 */
2766int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
2767{
2768 struct rdma_id_private *id_priv;
2769
2770 if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
2771 return -EINVAL;
2772
2773 id_priv = container_of(id, struct rdma_id_private, id);
2774 mutex_lock(&id_priv->qp_mutex);
2775 id_priv->timeout = timeout;
2776 id_priv->timeout_set = true;
2777 mutex_unlock(&id_priv->qp_mutex);
2778
2779 return 0;
2780}
2781EXPORT_SYMBOL(rdma_set_ack_timeout);
2782
2783/**
2784 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2785 * QP associated with a connection identifier.
2786 * @id: Communication identifier to associated with service type.
2787 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2788 * Timer Field" in the IBTA specification.
2789 *
2790 * This function should be called before rdma_connect() on active
2791 * side, and on passive side before rdma_accept(). The timer value
2792 * will be associated with the local QP. When it receives a send it is
2793 * not read to handle, typically if the receive queue is empty, an RNR
2794 * Retry NAK is returned to the requester with the min_rnr_timer
2795 * encoded. The requester will then wait at least the time specified
2796 * in the NAK before retrying. The default is zero, which translates
2797 * to a minimum RNR Timer value of 655 ms.
2798 *
2799 * Return: 0 for success
2800 */
2801int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
2802{
2803 struct rdma_id_private *id_priv;
2804
2805 /* It is a five-bit value */
2806 if (min_rnr_timer & 0xe0)
2807 return -EINVAL;
2808
2809 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
2810 return -EINVAL;
2811
2812 id_priv = container_of(id, struct rdma_id_private, id);
2813 mutex_lock(&id_priv->qp_mutex);
2814 id_priv->min_rnr_timer = min_rnr_timer;
2815 id_priv->min_rnr_timer_set = true;
2816 mutex_unlock(&id_priv->qp_mutex);
2817
2818 return 0;
2819}
2820EXPORT_SYMBOL(rdma_set_min_rnr_timer);
2821
2822static void route_set_path_rec_inbound(struct cma_work *work,
2823 struct sa_path_rec *path_rec)
2824{
2825 struct rdma_route *route = &work->id->id.route;
2826
2827 if (!route->path_rec_inbound) {
2828 route->path_rec_inbound =
2829 kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
2830 if (!route->path_rec_inbound)
2831 return;
2832 }
2833
2834 *route->path_rec_inbound = *path_rec;
2835}
2836
2837static void route_set_path_rec_outbound(struct cma_work *work,
2838 struct sa_path_rec *path_rec)
2839{
2840 struct rdma_route *route = &work->id->id.route;
2841
2842 if (!route->path_rec_outbound) {
2843 route->path_rec_outbound =
2844 kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
2845 if (!route->path_rec_outbound)
2846 return;
2847 }
2848
2849 *route->path_rec_outbound = *path_rec;
2850}
2851
2852static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2853 int num_prs, void *context)
2854{
2855 struct cma_work *work = context;
2856 struct rdma_route *route;
2857 int i;
2858
2859 route = &work->id->id.route;
2860
2861 if (status)
2862 goto fail;
2863
2864 for (i = 0; i < num_prs; i++) {
2865 if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
2866 *route->path_rec = path_rec[i];
2867 else if (path_rec[i].flags & IB_PATH_INBOUND)
2868 route_set_path_rec_inbound(work, &path_rec[i]);
2869 else if (path_rec[i].flags & IB_PATH_OUTBOUND)
2870 route_set_path_rec_outbound(work, &path_rec[i]);
2871 }
2872 if (!route->path_rec) {
2873 status = -EINVAL;
2874 goto fail;
2875 }
2876
2877 route->num_pri_alt_paths = 1;
2878 queue_work(cma_wq, &work->work);
2879 return;
2880
2881fail:
2882 work->old_state = RDMA_CM_ROUTE_QUERY;
2883 work->new_state = RDMA_CM_ADDR_RESOLVED;
2884 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
2885 work->event.status = status;
2886 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2887 status);
2888 queue_work(cma_wq, &work->work);
2889}
2890
2891static int cma_query_ib_route(struct rdma_id_private *id_priv,
2892 unsigned long timeout_ms, struct cma_work *work)
2893{
2894 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2895 struct sa_path_rec path_rec;
2896 ib_sa_comp_mask comp_mask;
2897 struct sockaddr_in6 *sin6;
2898 struct sockaddr_ib *sib;
2899
2900 memset(&path_rec, 0, sizeof path_rec);
2901
2902 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
2903 path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
2904 else
2905 path_rec.rec_type = SA_PATH_REC_TYPE_IB;
2906 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2907 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2908 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2909 path_rec.numb_path = 1;
2910 path_rec.reversible = 1;
2911 path_rec.service_id = rdma_get_service_id(&id_priv->id,
2912 cma_dst_addr(id_priv));
2913
2914 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2915 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2916 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2917
2918 switch (cma_family(id_priv)) {
2919 case AF_INET:
2920 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2921 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2922 break;
2923 case AF_INET6:
2924 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2925 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2926 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2927 break;
2928 case AF_IB:
2929 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2930 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2931 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2932 break;
2933 }
2934
2935 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2936 id_priv->id.port_num, &path_rec,
2937 comp_mask, timeout_ms,
2938 GFP_KERNEL, cma_query_handler,
2939 work, &id_priv->query);
2940
2941 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2942}
2943
2944static void cma_iboe_join_work_handler(struct work_struct *work)
2945{
2946 struct cma_multicast *mc =
2947 container_of(work, struct cma_multicast, iboe_join.work);
2948 struct rdma_cm_event *event = &mc->iboe_join.event;
2949 struct rdma_id_private *id_priv = mc->id_priv;
2950 int ret;
2951
2952 mutex_lock(&id_priv->handler_mutex);
2953 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
2954 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
2955 goto out_unlock;
2956
2957 ret = cma_cm_event_handler(id_priv, event);
2958 WARN_ON(ret);
2959
2960out_unlock:
2961 mutex_unlock(&id_priv->handler_mutex);
2962 if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
2963 rdma_destroy_ah_attr(&event->param.ud.ah_attr);
2964}
2965
2966static void cma_work_handler(struct work_struct *_work)
2967{
2968 struct cma_work *work = container_of(_work, struct cma_work, work);
2969 struct rdma_id_private *id_priv = work->id;
2970
2971 mutex_lock(&id_priv->handler_mutex);
2972 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
2973 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
2974 goto out_unlock;
2975 if (work->old_state != 0 || work->new_state != 0) {
2976 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
2977 goto out_unlock;
2978 }
2979
2980 if (cma_cm_event_handler(id_priv, &work->event)) {
2981 cma_id_put(id_priv);
2982 destroy_id_handler_unlock(id_priv);
2983 goto out_free;
2984 }
2985
2986out_unlock:
2987 mutex_unlock(&id_priv->handler_mutex);
2988 cma_id_put(id_priv);
2989out_free:
2990 if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
2991 rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
2992 kfree(work);
2993}
2994
2995static void cma_init_resolve_route_work(struct cma_work *work,
2996 struct rdma_id_private *id_priv)
2997{
2998 work->id = id_priv;
2999 INIT_WORK(&work->work, cma_work_handler);
3000 work->old_state = RDMA_CM_ROUTE_QUERY;
3001 work->new_state = RDMA_CM_ROUTE_RESOLVED;
3002 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
3003}
3004
3005static void enqueue_resolve_addr_work(struct cma_work *work,
3006 struct rdma_id_private *id_priv)
3007{
3008 /* Balances with cma_id_put() in cma_work_handler */
3009 cma_id_get(id_priv);
3010
3011 work->id = id_priv;
3012 INIT_WORK(&work->work, cma_work_handler);
3013 work->old_state = RDMA_CM_ADDR_QUERY;
3014 work->new_state = RDMA_CM_ADDR_RESOLVED;
3015 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3016
3017 queue_work(cma_wq, &work->work);
3018}
3019
3020static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
3021 unsigned long timeout_ms)
3022{
3023 struct rdma_route *route = &id_priv->id.route;
3024 struct cma_work *work;
3025 int ret;
3026
3027 work = kzalloc(sizeof *work, GFP_KERNEL);
3028 if (!work)
3029 return -ENOMEM;
3030
3031 cma_init_resolve_route_work(work, id_priv);
3032
3033 if (!route->path_rec)
3034 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
3035 if (!route->path_rec) {
3036 ret = -ENOMEM;
3037 goto err1;
3038 }
3039
3040 ret = cma_query_ib_route(id_priv, timeout_ms, work);
3041 if (ret)
3042 goto err2;
3043
3044 return 0;
3045err2:
3046 kfree(route->path_rec);
3047 route->path_rec = NULL;
3048err1:
3049 kfree(work);
3050 return ret;
3051}
3052
3053static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
3054 unsigned long supported_gids,
3055 enum ib_gid_type default_gid)
3056{
3057 if ((network_type == RDMA_NETWORK_IPV4 ||
3058 network_type == RDMA_NETWORK_IPV6) &&
3059 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
3060 return IB_GID_TYPE_ROCE_UDP_ENCAP;
3061
3062 return default_gid;
3063}
3064
3065/*
3066 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
3067 * path record type based on GID type.
3068 * It also sets up other L2 fields which includes destination mac address
3069 * netdev ifindex, of the path record.
3070 * It returns the netdev of the bound interface for this path record entry.
3071 */
3072static struct net_device *
3073cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
3074{
3075 struct rdma_route *route = &id_priv->id.route;
3076 enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
3077 struct rdma_addr *addr = &route->addr;
3078 unsigned long supported_gids;
3079 struct net_device *ndev;
3080
3081 if (!addr->dev_addr.bound_dev_if)
3082 return NULL;
3083
3084 ndev = dev_get_by_index(addr->dev_addr.net,
3085 addr->dev_addr.bound_dev_if);
3086 if (!ndev)
3087 return NULL;
3088
3089 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
3090 id_priv->id.port_num);
3091 gid_type = cma_route_gid_type(addr->dev_addr.network,
3092 supported_gids,
3093 id_priv->gid_type);
3094 /* Use the hint from IP Stack to select GID Type */
3095 if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
3096 gid_type = ib_network_to_gid_type(addr->dev_addr.network);
3097 route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
3098
3099 route->path_rec->roce.route_resolved = true;
3100 sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
3101 return ndev;
3102}
3103
3104int rdma_set_ib_path(struct rdma_cm_id *id,
3105 struct sa_path_rec *path_rec)
3106{
3107 struct rdma_id_private *id_priv;
3108 struct net_device *ndev;
3109 int ret;
3110
3111 id_priv = container_of(id, struct rdma_id_private, id);
3112 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3113 RDMA_CM_ROUTE_RESOLVED))
3114 return -EINVAL;
3115
3116 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
3117 GFP_KERNEL);
3118 if (!id->route.path_rec) {
3119 ret = -ENOMEM;
3120 goto err;
3121 }
3122
3123 if (rdma_protocol_roce(id->device, id->port_num)) {
3124 ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3125 if (!ndev) {
3126 ret = -ENODEV;
3127 goto err_free;
3128 }
3129 dev_put(ndev);
3130 }
3131
3132 id->route.num_pri_alt_paths = 1;
3133 return 0;
3134
3135err_free:
3136 kfree(id->route.path_rec);
3137 id->route.path_rec = NULL;
3138err:
3139 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
3140 return ret;
3141}
3142EXPORT_SYMBOL(rdma_set_ib_path);
3143
3144static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
3145{
3146 struct cma_work *work;
3147
3148 work = kzalloc(sizeof *work, GFP_KERNEL);
3149 if (!work)
3150 return -ENOMEM;
3151
3152 cma_init_resolve_route_work(work, id_priv);
3153 queue_work(cma_wq, &work->work);
3154 return 0;
3155}
3156
3157static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
3158{
3159 struct net_device *dev;
3160
3161 dev = vlan_dev_real_dev(vlan_ndev);
3162 if (dev->num_tc)
3163 return netdev_get_prio_tc_map(dev, prio);
3164
3165 return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
3166 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3167}
3168
3169struct iboe_prio_tc_map {
3170 int input_prio;
3171 int output_tc;
3172 bool found;
3173};
3174
3175static int get_lower_vlan_dev_tc(struct net_device *dev,
3176 struct netdev_nested_priv *priv)
3177{
3178 struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
3179
3180 if (is_vlan_dev(dev))
3181 map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
3182 else if (dev->num_tc)
3183 map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
3184 else
3185 map->output_tc = 0;
3186 /* We are interested only in first level VLAN device, so always
3187 * return 1 to stop iterating over next level devices.
3188 */
3189 map->found = true;
3190 return 1;
3191}
3192
3193static int iboe_tos_to_sl(struct net_device *ndev, int tos)
3194{
3195 struct iboe_prio_tc_map prio_tc_map = {};
3196 int prio = rt_tos2priority(tos);
3197 struct netdev_nested_priv priv;
3198
3199 /* If VLAN device, get it directly from the VLAN netdev */
3200 if (is_vlan_dev(ndev))
3201 return get_vlan_ndev_tc(ndev, prio);
3202
3203 prio_tc_map.input_prio = prio;
3204 priv.data = (void *)&prio_tc_map;
3205 rcu_read_lock();
3206 netdev_walk_all_lower_dev_rcu(ndev,
3207 get_lower_vlan_dev_tc,
3208 &priv);
3209 rcu_read_unlock();
3210 /* If map is found from lower device, use it; Otherwise
3211 * continue with the current netdevice to get priority to tc map.
3212 */
3213 if (prio_tc_map.found)
3214 return prio_tc_map.output_tc;
3215 else if (ndev->num_tc)
3216 return netdev_get_prio_tc_map(ndev, prio);
3217 else
3218 return 0;
3219}
3220
3221static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
3222{
3223 struct sockaddr_in6 *addr6;
3224 u16 dport, sport;
3225 u32 hash, fl;
3226
3227 addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
3228 fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
3229 if ((cma_family(id_priv) != AF_INET6) || !fl) {
3230 dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
3231 sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
3232 hash = (u32)sport * 31 + dport;
3233 fl = hash & IB_GRH_FLOWLABEL_MASK;
3234 }
3235
3236 return cpu_to_be32(fl);
3237}
3238
3239static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
3240{
3241 struct rdma_route *route = &id_priv->id.route;
3242 struct rdma_addr *addr = &route->addr;
3243 struct cma_work *work;
3244 int ret;
3245 struct net_device *ndev;
3246
3247 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
3248 rdma_start_port(id_priv->cma_dev->device)];
3249 u8 tos;
3250
3251 mutex_lock(&id_priv->qp_mutex);
3252 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
3253 mutex_unlock(&id_priv->qp_mutex);
3254
3255 work = kzalloc(sizeof *work, GFP_KERNEL);
3256 if (!work)
3257 return -ENOMEM;
3258
3259 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
3260 if (!route->path_rec) {
3261 ret = -ENOMEM;
3262 goto err1;
3263 }
3264
3265 route->num_pri_alt_paths = 1;
3266
3267 ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3268 if (!ndev) {
3269 ret = -ENODEV;
3270 goto err2;
3271 }
3272
3273 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3274 &route->path_rec->sgid);
3275 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
3276 &route->path_rec->dgid);
3277
3278 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
3279 /* TODO: get the hoplimit from the inet/inet6 device */
3280 route->path_rec->hop_limit = addr->dev_addr.hoplimit;
3281 else
3282 route->path_rec->hop_limit = 1;
3283 route->path_rec->reversible = 1;
3284 route->path_rec->pkey = cpu_to_be16(0xffff);
3285 route->path_rec->mtu_selector = IB_SA_EQ;
3286 route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
3287 route->path_rec->traffic_class = tos;
3288 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
3289 route->path_rec->rate_selector = IB_SA_EQ;
3290 route->path_rec->rate = iboe_get_rate(ndev);
3291 dev_put(ndev);
3292 route->path_rec->packet_life_time_selector = IB_SA_EQ;
3293 /* In case ACK timeout is set, use this value to calculate
3294 * PacketLifeTime. As per IBTA 12.7.34,
3295 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
3296 * Assuming a negligible local ACK delay, we can use
3297 * PacketLifeTime = local ACK timeout/2
3298 * as a reasonable approximation for RoCE networks.
3299 */
3300 mutex_lock(&id_priv->qp_mutex);
3301 if (id_priv->timeout_set && id_priv->timeout)
3302 route->path_rec->packet_life_time = id_priv->timeout - 1;
3303 else
3304 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
3305 mutex_unlock(&id_priv->qp_mutex);
3306
3307 if (!route->path_rec->mtu) {
3308 ret = -EINVAL;
3309 goto err2;
3310 }
3311
3312 if (rdma_protocol_roce_udp_encap(id_priv->id.device,
3313 id_priv->id.port_num))
3314 route->path_rec->flow_label =
3315 cma_get_roce_udp_flow_label(id_priv);
3316
3317 cma_init_resolve_route_work(work, id_priv);
3318 queue_work(cma_wq, &work->work);
3319
3320 return 0;
3321
3322err2:
3323 kfree(route->path_rec);
3324 route->path_rec = NULL;
3325 route->num_pri_alt_paths = 0;
3326err1:
3327 kfree(work);
3328 return ret;
3329}
3330
3331int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
3332{
3333 struct rdma_id_private *id_priv;
3334 int ret;
3335
3336 if (!timeout_ms)
3337 return -EINVAL;
3338
3339 id_priv = container_of(id, struct rdma_id_private, id);
3340 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
3341 return -EINVAL;
3342
3343 cma_id_get(id_priv);
3344 if (rdma_cap_ib_sa(id->device, id->port_num))
3345 ret = cma_resolve_ib_route(id_priv, timeout_ms);
3346 else if (rdma_protocol_roce(id->device, id->port_num)) {
3347 ret = cma_resolve_iboe_route(id_priv);
3348 if (!ret)
3349 cma_add_id_to_tree(id_priv);
3350 }
3351 else if (rdma_protocol_iwarp(id->device, id->port_num))
3352 ret = cma_resolve_iw_route(id_priv);
3353 else
3354 ret = -ENOSYS;
3355
3356 if (ret)
3357 goto err;
3358
3359 return 0;
3360err:
3361 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
3362 cma_id_put(id_priv);
3363 return ret;
3364}
3365EXPORT_SYMBOL(rdma_resolve_route);
3366
3367static void cma_set_loopback(struct sockaddr *addr)
3368{
3369 switch (addr->sa_family) {
3370 case AF_INET:
3371 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
3372 break;
3373 case AF_INET6:
3374 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
3375 0, 0, 0, htonl(1));
3376 break;
3377 default:
3378 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
3379 0, 0, 0, htonl(1));
3380 break;
3381 }
3382}
3383
3384static int cma_bind_loopback(struct rdma_id_private *id_priv)
3385{
3386 struct cma_device *cma_dev, *cur_dev;
3387 union ib_gid gid;
3388 enum ib_port_state port_state;
3389 unsigned int p;
3390 u16 pkey;
3391 int ret;
3392
3393 cma_dev = NULL;
3394 mutex_lock(&lock);
3395 list_for_each_entry(cur_dev, &dev_list, list) {
3396 if (cma_family(id_priv) == AF_IB &&
3397 !rdma_cap_ib_cm(cur_dev->device, 1))
3398 continue;
3399
3400 if (!cma_dev)
3401 cma_dev = cur_dev;
3402
3403 rdma_for_each_port (cur_dev->device, p) {
3404 if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
3405 port_state == IB_PORT_ACTIVE) {
3406 cma_dev = cur_dev;
3407 goto port_found;
3408 }
3409 }
3410 }
3411
3412 if (!cma_dev) {
3413 ret = -ENODEV;
3414 goto out;
3415 }
3416
3417 p = 1;
3418
3419port_found:
3420 ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
3421 if (ret)
3422 goto out;
3423
3424 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
3425 if (ret)
3426 goto out;
3427
3428 id_priv->id.route.addr.dev_addr.dev_type =
3429 (rdma_protocol_ib(cma_dev->device, p)) ?
3430 ARPHRD_INFINIBAND : ARPHRD_ETHER;
3431
3432 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3433 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
3434 id_priv->id.port_num = p;
3435 cma_attach_to_dev(id_priv, cma_dev);
3436 rdma_restrack_add(&id_priv->res);
3437 cma_set_loopback(cma_src_addr(id_priv));
3438out:
3439 mutex_unlock(&lock);
3440 return ret;
3441}
3442
3443static void addr_handler(int status, struct sockaddr *src_addr,
3444 struct rdma_dev_addr *dev_addr, void *context)
3445{
3446 struct rdma_id_private *id_priv = context;
3447 struct rdma_cm_event event = {};
3448 struct sockaddr *addr;
3449 struct sockaddr_storage old_addr;
3450
3451 mutex_lock(&id_priv->handler_mutex);
3452 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
3453 RDMA_CM_ADDR_RESOLVED))
3454 goto out;
3455
3456 /*
3457 * Store the previous src address, so that if we fail to acquire
3458 * matching rdma device, old address can be restored back, which helps
3459 * to cancel the cma listen operation correctly.
3460 */
3461 addr = cma_src_addr(id_priv);
3462 memcpy(&old_addr, addr, rdma_addr_size(addr));
3463 memcpy(addr, src_addr, rdma_addr_size(src_addr));
3464 if (!status && !id_priv->cma_dev) {
3465 status = cma_acquire_dev_by_src_ip(id_priv);
3466 if (status)
3467 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3468 status);
3469 rdma_restrack_add(&id_priv->res);
3470 } else if (status) {
3471 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
3472 }
3473
3474 if (status) {
3475 memcpy(addr, &old_addr,
3476 rdma_addr_size((struct sockaddr *)&old_addr));
3477 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3478 RDMA_CM_ADDR_BOUND))
3479 goto out;
3480 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3481 event.status = status;
3482 } else
3483 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3484
3485 if (cma_cm_event_handler(id_priv, &event)) {
3486 destroy_id_handler_unlock(id_priv);
3487 return;
3488 }
3489out:
3490 mutex_unlock(&id_priv->handler_mutex);
3491}
3492
3493static int cma_resolve_loopback(struct rdma_id_private *id_priv)
3494{
3495 struct cma_work *work;
3496 union ib_gid gid;
3497 int ret;
3498
3499 work = kzalloc(sizeof *work, GFP_KERNEL);
3500 if (!work)
3501 return -ENOMEM;
3502
3503 if (!id_priv->cma_dev) {
3504 ret = cma_bind_loopback(id_priv);
3505 if (ret)
3506 goto err;
3507 }
3508
3509 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3510 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
3511
3512 enqueue_resolve_addr_work(work, id_priv);
3513 return 0;
3514err:
3515 kfree(work);
3516 return ret;
3517}
3518
3519static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
3520{
3521 struct cma_work *work;
3522 int ret;
3523
3524 work = kzalloc(sizeof *work, GFP_KERNEL);
3525 if (!work)
3526 return -ENOMEM;
3527
3528 if (!id_priv->cma_dev) {
3529 ret = cma_resolve_ib_dev(id_priv);
3530 if (ret)
3531 goto err;
3532 }
3533
3534 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
3535 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
3536
3537 enqueue_resolve_addr_work(work, id_priv);
3538 return 0;
3539err:
3540 kfree(work);
3541 return ret;
3542}
3543
3544static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
3545 const struct sockaddr *dst_addr)
3546{
3547 struct sockaddr_storage zero_sock = {};
3548
3549 if (src_addr && src_addr->sa_family)
3550 return rdma_bind_addr(id, src_addr);
3551
3552 /*
3553 * When the src_addr is not specified, automatically supply an any addr
3554 */
3555 zero_sock.ss_family = dst_addr->sa_family;
3556 if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
3557 struct sockaddr_in6 *src_addr6 =
3558 (struct sockaddr_in6 *)&zero_sock;
3559 struct sockaddr_in6 *dst_addr6 =
3560 (struct sockaddr_in6 *)dst_addr;
3561
3562 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
3563 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
3564 id->route.addr.dev_addr.bound_dev_if =
3565 dst_addr6->sin6_scope_id;
3566 } else if (dst_addr->sa_family == AF_IB) {
3567 ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
3568 ((struct sockaddr_ib *)dst_addr)->sib_pkey;
3569 }
3570 return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
3571}
3572
3573/*
3574 * If required, resolve the source address for bind and leave the id_priv in
3575 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
3576 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
3577 * ignored.
3578 */
3579static int resolve_prepare_src(struct rdma_id_private *id_priv,
3580 struct sockaddr *src_addr,
3581 const struct sockaddr *dst_addr)
3582{
3583 int ret;
3584
3585 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
3586 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
3587 /* For a well behaved ULP state will be RDMA_CM_IDLE */
3588 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
3589 if (ret)
3590 goto err_dst;
3591 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
3592 RDMA_CM_ADDR_QUERY))) {
3593 ret = -EINVAL;
3594 goto err_dst;
3595 }
3596 }
3597
3598 if (cma_family(id_priv) != dst_addr->sa_family) {
3599 ret = -EINVAL;
3600 goto err_state;
3601 }
3602 return 0;
3603
3604err_state:
3605 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
3606err_dst:
3607 memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
3608 return ret;
3609}
3610
3611int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
3612 const struct sockaddr *dst_addr, unsigned long timeout_ms)
3613{
3614 struct rdma_id_private *id_priv =
3615 container_of(id, struct rdma_id_private, id);
3616 int ret;
3617
3618 ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
3619 if (ret)
3620 return ret;
3621
3622 if (cma_any_addr(dst_addr)) {
3623 ret = cma_resolve_loopback(id_priv);
3624 } else {
3625 if (dst_addr->sa_family == AF_IB) {
3626 ret = cma_resolve_ib_addr(id_priv);
3627 } else {
3628 /*
3629 * The FSM can return back to RDMA_CM_ADDR_BOUND after
3630 * rdma_resolve_ip() is called, eg through the error
3631 * path in addr_handler(). If this happens the existing
3632 * request must be canceled before issuing a new one.
3633 * Since canceling a request is a bit slow and this
3634 * oddball path is rare, keep track once a request has
3635 * been issued. The track turns out to be a permanent
3636 * state since this is the only cancel as it is
3637 * immediately before rdma_resolve_ip().
3638 */
3639 if (id_priv->used_resolve_ip)
3640 rdma_addr_cancel(&id->route.addr.dev_addr);
3641 else
3642 id_priv->used_resolve_ip = 1;
3643 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
3644 &id->route.addr.dev_addr,
3645 timeout_ms, addr_handler,
3646 false, id_priv);
3647 }
3648 }
3649 if (ret)
3650 goto err;
3651
3652 return 0;
3653err:
3654 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
3655 return ret;
3656}
3657EXPORT_SYMBOL(rdma_resolve_addr);
3658
3659int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3660{
3661 struct rdma_id_private *id_priv;
3662 unsigned long flags;
3663 int ret;
3664
3665 id_priv = container_of(id, struct rdma_id_private, id);
3666 spin_lock_irqsave(&id_priv->lock, flags);
3667 if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
3668 id_priv->state == RDMA_CM_IDLE) {
3669 id_priv->reuseaddr = reuse;
3670 ret = 0;
3671 } else {
3672 ret = -EINVAL;
3673 }
3674 spin_unlock_irqrestore(&id_priv->lock, flags);
3675 return ret;
3676}
3677EXPORT_SYMBOL(rdma_set_reuseaddr);
3678
3679int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
3680{
3681 struct rdma_id_private *id_priv;
3682 unsigned long flags;
3683 int ret;
3684
3685 id_priv = container_of(id, struct rdma_id_private, id);
3686 spin_lock_irqsave(&id_priv->lock, flags);
3687 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
3688 id_priv->options |= (1 << CMA_OPTION_AFONLY);
3689 id_priv->afonly = afonly;
3690 ret = 0;
3691 } else {
3692 ret = -EINVAL;
3693 }
3694 spin_unlock_irqrestore(&id_priv->lock, flags);
3695 return ret;
3696}
3697EXPORT_SYMBOL(rdma_set_afonly);
3698
3699static void cma_bind_port(struct rdma_bind_list *bind_list,
3700 struct rdma_id_private *id_priv)
3701{
3702 struct sockaddr *addr;
3703 struct sockaddr_ib *sib;
3704 u64 sid, mask;
3705 __be16 port;
3706
3707 lockdep_assert_held(&lock);
3708
3709 addr = cma_src_addr(id_priv);
3710 port = htons(bind_list->port);
3711
3712 switch (addr->sa_family) {
3713 case AF_INET:
3714 ((struct sockaddr_in *) addr)->sin_port = port;
3715 break;
3716 case AF_INET6:
3717 ((struct sockaddr_in6 *) addr)->sin6_port = port;
3718 break;
3719 case AF_IB:
3720 sib = (struct sockaddr_ib *) addr;
3721 sid = be64_to_cpu(sib->sib_sid);
3722 mask = be64_to_cpu(sib->sib_sid_mask);
3723 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
3724 sib->sib_sid_mask = cpu_to_be64(~0ULL);
3725 break;
3726 }
3727 id_priv->bind_list = bind_list;
3728 hlist_add_head(&id_priv->node, &bind_list->owners);
3729}
3730
3731static int cma_alloc_port(enum rdma_ucm_port_space ps,
3732 struct rdma_id_private *id_priv, unsigned short snum)
3733{
3734 struct rdma_bind_list *bind_list;
3735 int ret;
3736
3737 lockdep_assert_held(&lock);
3738
3739 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
3740 if (!bind_list)
3741 return -ENOMEM;
3742
3743 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3744 snum);
3745 if (ret < 0)
3746 goto err;
3747
3748 bind_list->ps = ps;
3749 bind_list->port = snum;
3750 cma_bind_port(bind_list, id_priv);
3751 return 0;
3752err:
3753 kfree(bind_list);
3754 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
3755}
3756
3757static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3758 struct rdma_id_private *id_priv)
3759{
3760 struct rdma_id_private *cur_id;
3761 struct sockaddr *daddr = cma_dst_addr(id_priv);
3762 struct sockaddr *saddr = cma_src_addr(id_priv);
3763 __be16 dport = cma_port(daddr);
3764
3765 lockdep_assert_held(&lock);
3766
3767 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3768 struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
3769 struct sockaddr *cur_saddr = cma_src_addr(cur_id);
3770 __be16 cur_dport = cma_port(cur_daddr);
3771
3772 if (id_priv == cur_id)
3773 continue;
3774
3775 /* different dest port -> unique */
3776 if (!cma_any_port(daddr) &&
3777 !cma_any_port(cur_daddr) &&
3778 (dport != cur_dport))
3779 continue;
3780
3781 /* different src address -> unique */
3782 if (!cma_any_addr(saddr) &&
3783 !cma_any_addr(cur_saddr) &&
3784 cma_addr_cmp(saddr, cur_saddr))
3785 continue;
3786
3787 /* different dst address -> unique */
3788 if (!cma_any_addr(daddr) &&
3789 !cma_any_addr(cur_daddr) &&
3790 cma_addr_cmp(daddr, cur_daddr))
3791 continue;
3792
3793 return -EADDRNOTAVAIL;
3794 }
3795 return 0;
3796}
3797
3798static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
3799 struct rdma_id_private *id_priv)
3800{
3801 static unsigned int last_used_port;
3802 int low, high, remaining;
3803 unsigned int rover;
3804 struct net *net = id_priv->id.route.addr.dev_addr.net;
3805
3806 lockdep_assert_held(&lock);
3807
3808 inet_get_local_port_range(net, &low, &high);
3809 remaining = (high - low) + 1;
3810 rover = get_random_u32_inclusive(low, remaining + low - 1);
3811retry:
3812 if (last_used_port != rover) {
3813 struct rdma_bind_list *bind_list;
3814 int ret;
3815
3816 bind_list = cma_ps_find(net, ps, (unsigned short)rover);
3817
3818 if (!bind_list) {
3819 ret = cma_alloc_port(ps, id_priv, rover);
3820 } else {
3821 ret = cma_port_is_unique(bind_list, id_priv);
3822 if (!ret)
3823 cma_bind_port(bind_list, id_priv);
3824 }
3825 /*
3826 * Remember previously used port number in order to avoid
3827 * re-using same port immediately after it is closed.
3828 */
3829 if (!ret)
3830 last_used_port = rover;
3831 if (ret != -EADDRNOTAVAIL)
3832 return ret;
3833 }
3834 if (--remaining) {
3835 rover++;
3836 if ((rover < low) || (rover > high))
3837 rover = low;
3838 goto retry;
3839 }
3840 return -EADDRNOTAVAIL;
3841}
3842
3843/*
3844 * Check that the requested port is available. This is called when trying to
3845 * bind to a specific port, or when trying to listen on a bound port. In
3846 * the latter case, the provided id_priv may already be on the bind_list, but
3847 * we still need to check that it's okay to start listening.
3848 */
3849static int cma_check_port(struct rdma_bind_list *bind_list,
3850 struct rdma_id_private *id_priv, uint8_t reuseaddr)
3851{
3852 struct rdma_id_private *cur_id;
3853 struct sockaddr *addr, *cur_addr;
3854
3855 lockdep_assert_held(&lock);
3856
3857 addr = cma_src_addr(id_priv);
3858 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3859 if (id_priv == cur_id)
3860 continue;
3861
3862 if (reuseaddr && cur_id->reuseaddr)
3863 continue;
3864
3865 cur_addr = cma_src_addr(cur_id);
3866 if (id_priv->afonly && cur_id->afonly &&
3867 (addr->sa_family != cur_addr->sa_family))
3868 continue;
3869
3870 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
3871 return -EADDRNOTAVAIL;
3872
3873 if (!cma_addr_cmp(addr, cur_addr))
3874 return -EADDRINUSE;
3875 }
3876 return 0;
3877}
3878
3879static int cma_use_port(enum rdma_ucm_port_space ps,
3880 struct rdma_id_private *id_priv)
3881{
3882 struct rdma_bind_list *bind_list;
3883 unsigned short snum;
3884 int ret;
3885
3886 lockdep_assert_held(&lock);
3887
3888 snum = ntohs(cma_port(cma_src_addr(id_priv)));
3889 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
3890 return -EACCES;
3891
3892 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3893 if (!bind_list) {
3894 ret = cma_alloc_port(ps, id_priv, snum);
3895 } else {
3896 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
3897 if (!ret)
3898 cma_bind_port(bind_list, id_priv);
3899 }
3900 return ret;
3901}
3902
3903static enum rdma_ucm_port_space
3904cma_select_inet_ps(struct rdma_id_private *id_priv)
3905{
3906 switch (id_priv->id.ps) {
3907 case RDMA_PS_TCP:
3908 case RDMA_PS_UDP:
3909 case RDMA_PS_IPOIB:
3910 case RDMA_PS_IB:
3911 return id_priv->id.ps;
3912 default:
3913
3914 return 0;
3915 }
3916}
3917
3918static enum rdma_ucm_port_space
3919cma_select_ib_ps(struct rdma_id_private *id_priv)
3920{
3921 enum rdma_ucm_port_space ps = 0;
3922 struct sockaddr_ib *sib;
3923 u64 sid_ps, mask, sid;
3924
3925 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
3926 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
3927 sid = be64_to_cpu(sib->sib_sid) & mask;
3928
3929 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
3930 sid_ps = RDMA_IB_IP_PS_IB;
3931 ps = RDMA_PS_IB;
3932 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
3933 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
3934 sid_ps = RDMA_IB_IP_PS_TCP;
3935 ps = RDMA_PS_TCP;
3936 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
3937 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
3938 sid_ps = RDMA_IB_IP_PS_UDP;
3939 ps = RDMA_PS_UDP;
3940 }
3941
3942 if (ps) {
3943 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
3944 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
3945 be64_to_cpu(sib->sib_sid_mask));
3946 }
3947 return ps;
3948}
3949
3950static int cma_get_port(struct rdma_id_private *id_priv)
3951{
3952 enum rdma_ucm_port_space ps;
3953 int ret;
3954
3955 if (cma_family(id_priv) != AF_IB)
3956 ps = cma_select_inet_ps(id_priv);
3957 else
3958 ps = cma_select_ib_ps(id_priv);
3959 if (!ps)
3960 return -EPROTONOSUPPORT;
3961
3962 mutex_lock(&lock);
3963 if (cma_any_port(cma_src_addr(id_priv)))
3964 ret = cma_alloc_any_port(ps, id_priv);
3965 else
3966 ret = cma_use_port(ps, id_priv);
3967 mutex_unlock(&lock);
3968
3969 return ret;
3970}
3971
3972static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
3973 struct sockaddr *addr)
3974{
3975#if IS_ENABLED(CONFIG_IPV6)
3976 struct sockaddr_in6 *sin6;
3977
3978 if (addr->sa_family != AF_INET6)
3979 return 0;
3980
3981 sin6 = (struct sockaddr_in6 *) addr;
3982
3983 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
3984 return 0;
3985
3986 if (!sin6->sin6_scope_id)
3987 return -EINVAL;
3988
3989 dev_addr->bound_dev_if = sin6->sin6_scope_id;
3990#endif
3991 return 0;
3992}
3993
3994int rdma_listen(struct rdma_cm_id *id, int backlog)
3995{
3996 struct rdma_id_private *id_priv =
3997 container_of(id, struct rdma_id_private, id);
3998 int ret;
3999
4000 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
4001 struct sockaddr_in any_in = {
4002 .sin_family = AF_INET,
4003 .sin_addr.s_addr = htonl(INADDR_ANY),
4004 };
4005
4006 /* For a well behaved ULP state will be RDMA_CM_IDLE */
4007 ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
4008 if (ret)
4009 return ret;
4010 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
4011 RDMA_CM_LISTEN)))
4012 return -EINVAL;
4013 }
4014
4015 /*
4016 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
4017 * any more, and has to be unique in the bind list.
4018 */
4019 if (id_priv->reuseaddr) {
4020 mutex_lock(&lock);
4021 ret = cma_check_port(id_priv->bind_list, id_priv, 0);
4022 if (!ret)
4023 id_priv->reuseaddr = 0;
4024 mutex_unlock(&lock);
4025 if (ret)
4026 goto err;
4027 }
4028
4029 id_priv->backlog = backlog;
4030 if (id_priv->cma_dev) {
4031 if (rdma_cap_ib_cm(id->device, 1)) {
4032 ret = cma_ib_listen(id_priv);
4033 if (ret)
4034 goto err;
4035 } else if (rdma_cap_iw_cm(id->device, 1)) {
4036 ret = cma_iw_listen(id_priv, backlog);
4037 if (ret)
4038 goto err;
4039 } else {
4040 ret = -ENOSYS;
4041 goto err;
4042 }
4043 } else {
4044 ret = cma_listen_on_all(id_priv);
4045 if (ret)
4046 goto err;
4047 }
4048
4049 return 0;
4050err:
4051 id_priv->backlog = 0;
4052 /*
4053 * All the failure paths that lead here will not allow the req_handler's
4054 * to have run.
4055 */
4056 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
4057 return ret;
4058}
4059EXPORT_SYMBOL(rdma_listen);
4060
4061int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
4062{
4063 struct rdma_id_private *id_priv;
4064 int ret;
4065 struct sockaddr *daddr;
4066
4067 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
4068 addr->sa_family != AF_IB)
4069 return -EAFNOSUPPORT;
4070
4071 id_priv = container_of(id, struct rdma_id_private, id);
4072 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
4073 return -EINVAL;
4074
4075 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
4076 if (ret)
4077 goto err1;
4078
4079 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
4080 if (!cma_any_addr(addr)) {
4081 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
4082 if (ret)
4083 goto err1;
4084
4085 ret = cma_acquire_dev_by_src_ip(id_priv);
4086 if (ret)
4087 goto err1;
4088 }
4089
4090 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
4091 if (addr->sa_family == AF_INET)
4092 id_priv->afonly = 1;
4093#if IS_ENABLED(CONFIG_IPV6)
4094 else if (addr->sa_family == AF_INET6) {
4095 struct net *net = id_priv->id.route.addr.dev_addr.net;
4096
4097 id_priv->afonly = net->ipv6.sysctl.bindv6only;
4098 }
4099#endif
4100 }
4101 daddr = cma_dst_addr(id_priv);
4102 daddr->sa_family = addr->sa_family;
4103
4104 ret = cma_get_port(id_priv);
4105 if (ret)
4106 goto err2;
4107
4108 if (!cma_any_addr(addr))
4109 rdma_restrack_add(&id_priv->res);
4110 return 0;
4111err2:
4112 if (id_priv->cma_dev)
4113 cma_release_dev(id_priv);
4114err1:
4115 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
4116 return ret;
4117}
4118EXPORT_SYMBOL(rdma_bind_addr);
4119
4120static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
4121{
4122 struct cma_hdr *cma_hdr;
4123
4124 cma_hdr = hdr;
4125 cma_hdr->cma_version = CMA_VERSION;
4126 if (cma_family(id_priv) == AF_INET) {
4127 struct sockaddr_in *src4, *dst4;
4128
4129 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
4130 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
4131
4132 cma_set_ip_ver(cma_hdr, 4);
4133 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
4134 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
4135 cma_hdr->port = src4->sin_port;
4136 } else if (cma_family(id_priv) == AF_INET6) {
4137 struct sockaddr_in6 *src6, *dst6;
4138
4139 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
4140 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
4141
4142 cma_set_ip_ver(cma_hdr, 6);
4143 cma_hdr->src_addr.ip6 = src6->sin6_addr;
4144 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
4145 cma_hdr->port = src6->sin6_port;
4146 }
4147 return 0;
4148}
4149
4150static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
4151 const struct ib_cm_event *ib_event)
4152{
4153 struct rdma_id_private *id_priv = cm_id->context;
4154 struct rdma_cm_event event = {};
4155 const struct ib_cm_sidr_rep_event_param *rep =
4156 &ib_event->param.sidr_rep_rcvd;
4157 int ret;
4158
4159 mutex_lock(&id_priv->handler_mutex);
4160 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4161 goto out;
4162
4163 switch (ib_event->event) {
4164 case IB_CM_SIDR_REQ_ERROR:
4165 event.event = RDMA_CM_EVENT_UNREACHABLE;
4166 event.status = -ETIMEDOUT;
4167 break;
4168 case IB_CM_SIDR_REP_RECEIVED:
4169 event.param.ud.private_data = ib_event->private_data;
4170 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
4171 if (rep->status != IB_SIDR_SUCCESS) {
4172 event.event = RDMA_CM_EVENT_UNREACHABLE;
4173 event.status = ib_event->param.sidr_rep_rcvd.status;
4174 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
4175 event.status);
4176 break;
4177 }
4178 ret = cma_set_qkey(id_priv, rep->qkey);
4179 if (ret) {
4180 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
4181 event.event = RDMA_CM_EVENT_ADDR_ERROR;
4182 event.status = ret;
4183 break;
4184 }
4185 ib_init_ah_attr_from_path(id_priv->id.device,
4186 id_priv->id.port_num,
4187 id_priv->id.route.path_rec,
4188 &event.param.ud.ah_attr,
4189 rep->sgid_attr);
4190 event.param.ud.qp_num = rep->qpn;
4191 event.param.ud.qkey = rep->qkey;
4192 event.event = RDMA_CM_EVENT_ESTABLISHED;
4193 event.status = 0;
4194 break;
4195 default:
4196 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
4197 ib_event->event);
4198 goto out;
4199 }
4200
4201 ret = cma_cm_event_handler(id_priv, &event);
4202
4203 rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4204 if (ret) {
4205 /* Destroy the CM ID by returning a non-zero value. */
4206 id_priv->cm_id.ib = NULL;
4207 destroy_id_handler_unlock(id_priv);
4208 return ret;
4209 }
4210out:
4211 mutex_unlock(&id_priv->handler_mutex);
4212 return 0;
4213}
4214
4215static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
4216 struct rdma_conn_param *conn_param)
4217{
4218 struct ib_cm_sidr_req_param req;
4219 struct ib_cm_id *id;
4220 void *private_data;
4221 u8 offset;
4222 int ret;
4223
4224 memset(&req, 0, sizeof req);
4225 offset = cma_user_data_offset(id_priv);
4226 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
4227 return -EINVAL;
4228
4229 if (req.private_data_len) {
4230 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4231 if (!private_data)
4232 return -ENOMEM;
4233 } else {
4234 private_data = NULL;
4235 }
4236
4237 if (conn_param->private_data && conn_param->private_data_len)
4238 memcpy(private_data + offset, conn_param->private_data,
4239 conn_param->private_data_len);
4240
4241 if (private_data) {
4242 ret = cma_format_hdr(private_data, id_priv);
4243 if (ret)
4244 goto out;
4245 req.private_data = private_data;
4246 }
4247
4248 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
4249 id_priv);
4250 if (IS_ERR(id)) {
4251 ret = PTR_ERR(id);
4252 goto out;
4253 }
4254 id_priv->cm_id.ib = id;
4255
4256 req.path = id_priv->id.route.path_rec;
4257 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4258 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4259 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
4260 req.max_cm_retries = CMA_MAX_CM_RETRIES;
4261
4262 trace_cm_send_sidr_req(id_priv);
4263 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
4264 if (ret) {
4265 ib_destroy_cm_id(id_priv->cm_id.ib);
4266 id_priv->cm_id.ib = NULL;
4267 }
4268out:
4269 kfree(private_data);
4270 return ret;
4271}
4272
4273static int cma_connect_ib(struct rdma_id_private *id_priv,
4274 struct rdma_conn_param *conn_param)
4275{
4276 struct ib_cm_req_param req;
4277 struct rdma_route *route;
4278 void *private_data;
4279 struct ib_cm_id *id;
4280 u8 offset;
4281 int ret;
4282
4283 memset(&req, 0, sizeof req);
4284 offset = cma_user_data_offset(id_priv);
4285 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
4286 return -EINVAL;
4287
4288 if (req.private_data_len) {
4289 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4290 if (!private_data)
4291 return -ENOMEM;
4292 } else {
4293 private_data = NULL;
4294 }
4295
4296 if (conn_param->private_data && conn_param->private_data_len)
4297 memcpy(private_data + offset, conn_param->private_data,
4298 conn_param->private_data_len);
4299
4300 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
4301 if (IS_ERR(id)) {
4302 ret = PTR_ERR(id);
4303 goto out;
4304 }
4305 id_priv->cm_id.ib = id;
4306
4307 route = &id_priv->id.route;
4308 if (private_data) {
4309 ret = cma_format_hdr(private_data, id_priv);
4310 if (ret)
4311 goto out;
4312 req.private_data = private_data;
4313 }
4314
4315 req.primary_path = &route->path_rec[0];
4316 req.primary_path_inbound = route->path_rec_inbound;
4317 req.primary_path_outbound = route->path_rec_outbound;
4318 if (route->num_pri_alt_paths == 2)
4319 req.alternate_path = &route->path_rec[1];
4320
4321 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4322 /* Alternate path SGID attribute currently unsupported */
4323 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4324 req.qp_num = id_priv->qp_num;
4325 req.qp_type = id_priv->id.qp_type;
4326 req.starting_psn = id_priv->seq_num;
4327 req.responder_resources = conn_param->responder_resources;
4328 req.initiator_depth = conn_param->initiator_depth;
4329 req.flow_control = conn_param->flow_control;
4330 req.retry_count = min_t(u8, 7, conn_param->retry_count);
4331 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4332 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4333 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4334 req.max_cm_retries = CMA_MAX_CM_RETRIES;
4335 req.srq = id_priv->srq ? 1 : 0;
4336 req.ece.vendor_id = id_priv->ece.vendor_id;
4337 req.ece.attr_mod = id_priv->ece.attr_mod;
4338
4339 trace_cm_send_req(id_priv);
4340 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
4341out:
4342 if (ret && !IS_ERR(id)) {
4343 ib_destroy_cm_id(id);
4344 id_priv->cm_id.ib = NULL;
4345 }
4346
4347 kfree(private_data);
4348 return ret;
4349}
4350
4351static int cma_connect_iw(struct rdma_id_private *id_priv,
4352 struct rdma_conn_param *conn_param)
4353{
4354 struct iw_cm_id *cm_id;
4355 int ret;
4356 struct iw_cm_conn_param iw_param;
4357
4358 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
4359 if (IS_ERR(cm_id))
4360 return PTR_ERR(cm_id);
4361
4362 mutex_lock(&id_priv->qp_mutex);
4363 cm_id->tos = id_priv->tos;
4364 cm_id->tos_set = id_priv->tos_set;
4365 mutex_unlock(&id_priv->qp_mutex);
4366
4367 id_priv->cm_id.iw = cm_id;
4368
4369 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
4370 rdma_addr_size(cma_src_addr(id_priv)));
4371 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
4372 rdma_addr_size(cma_dst_addr(id_priv)));
4373
4374 ret = cma_modify_qp_rtr(id_priv, conn_param);
4375 if (ret)
4376 goto out;
4377
4378 if (conn_param) {
4379 iw_param.ord = conn_param->initiator_depth;
4380 iw_param.ird = conn_param->responder_resources;
4381 iw_param.private_data = conn_param->private_data;
4382 iw_param.private_data_len = conn_param->private_data_len;
4383 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
4384 } else {
4385 memset(&iw_param, 0, sizeof iw_param);
4386 iw_param.qpn = id_priv->qp_num;
4387 }
4388 ret = iw_cm_connect(cm_id, &iw_param);
4389out:
4390 if (ret) {
4391 iw_destroy_cm_id(cm_id);
4392 id_priv->cm_id.iw = NULL;
4393 }
4394 return ret;
4395}
4396
4397/**
4398 * rdma_connect_locked - Initiate an active connection request.
4399 * @id: Connection identifier to connect.
4400 * @conn_param: Connection information used for connected QPs.
4401 *
4402 * Same as rdma_connect() but can only be called from the
4403 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
4404 */
4405int rdma_connect_locked(struct rdma_cm_id *id,
4406 struct rdma_conn_param *conn_param)
4407{
4408 struct rdma_id_private *id_priv =
4409 container_of(id, struct rdma_id_private, id);
4410 int ret;
4411
4412 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
4413 return -EINVAL;
4414
4415 if (!id->qp) {
4416 id_priv->qp_num = conn_param->qp_num;
4417 id_priv->srq = conn_param->srq;
4418 }
4419
4420 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4421 if (id->qp_type == IB_QPT_UD)
4422 ret = cma_resolve_ib_udp(id_priv, conn_param);
4423 else
4424 ret = cma_connect_ib(id_priv, conn_param);
4425 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4426 ret = cma_connect_iw(id_priv, conn_param);
4427 } else {
4428 ret = -ENOSYS;
4429 }
4430 if (ret)
4431 goto err_state;
4432 return 0;
4433err_state:
4434 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
4435 return ret;
4436}
4437EXPORT_SYMBOL(rdma_connect_locked);
4438
4439/**
4440 * rdma_connect - Initiate an active connection request.
4441 * @id: Connection identifier to connect.
4442 * @conn_param: Connection information used for connected QPs.
4443 *
4444 * Users must have resolved a route for the rdma_cm_id to connect with by having
4445 * called rdma_resolve_route before calling this routine.
4446 *
4447 * This call will either connect to a remote QP or obtain remote QP information
4448 * for unconnected rdma_cm_id's. The actual operation is based on the
4449 * rdma_cm_id's port space.
4450 */
4451int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4452{
4453 struct rdma_id_private *id_priv =
4454 container_of(id, struct rdma_id_private, id);
4455 int ret;
4456
4457 mutex_lock(&id_priv->handler_mutex);
4458 ret = rdma_connect_locked(id, conn_param);
4459 mutex_unlock(&id_priv->handler_mutex);
4460 return ret;
4461}
4462EXPORT_SYMBOL(rdma_connect);
4463
4464/**
4465 * rdma_connect_ece - Initiate an active connection request with ECE data.
4466 * @id: Connection identifier to connect.
4467 * @conn_param: Connection information used for connected QPs.
4468 * @ece: ECE parameters
4469 *
4470 * See rdma_connect() explanation.
4471 */
4472int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4473 struct rdma_ucm_ece *ece)
4474{
4475 struct rdma_id_private *id_priv =
4476 container_of(id, struct rdma_id_private, id);
4477
4478 id_priv->ece.vendor_id = ece->vendor_id;
4479 id_priv->ece.attr_mod = ece->attr_mod;
4480
4481 return rdma_connect(id, conn_param);
4482}
4483EXPORT_SYMBOL(rdma_connect_ece);
4484
4485static int cma_accept_ib(struct rdma_id_private *id_priv,
4486 struct rdma_conn_param *conn_param)
4487{
4488 struct ib_cm_rep_param rep;
4489 int ret;
4490
4491 ret = cma_modify_qp_rtr(id_priv, conn_param);
4492 if (ret)
4493 goto out;
4494
4495 ret = cma_modify_qp_rts(id_priv, conn_param);
4496 if (ret)
4497 goto out;
4498
4499 memset(&rep, 0, sizeof rep);
4500 rep.qp_num = id_priv->qp_num;
4501 rep.starting_psn = id_priv->seq_num;
4502 rep.private_data = conn_param->private_data;
4503 rep.private_data_len = conn_param->private_data_len;
4504 rep.responder_resources = conn_param->responder_resources;
4505 rep.initiator_depth = conn_param->initiator_depth;
4506 rep.failover_accepted = 0;
4507 rep.flow_control = conn_param->flow_control;
4508 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4509 rep.srq = id_priv->srq ? 1 : 0;
4510 rep.ece.vendor_id = id_priv->ece.vendor_id;
4511 rep.ece.attr_mod = id_priv->ece.attr_mod;
4512
4513 trace_cm_send_rep(id_priv);
4514 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
4515out:
4516 return ret;
4517}
4518
4519static int cma_accept_iw(struct rdma_id_private *id_priv,
4520 struct rdma_conn_param *conn_param)
4521{
4522 struct iw_cm_conn_param iw_param;
4523 int ret;
4524
4525 if (!conn_param)
4526 return -EINVAL;
4527
4528 ret = cma_modify_qp_rtr(id_priv, conn_param);
4529 if (ret)
4530 return ret;
4531
4532 iw_param.ord = conn_param->initiator_depth;
4533 iw_param.ird = conn_param->responder_resources;
4534 iw_param.private_data = conn_param->private_data;
4535 iw_param.private_data_len = conn_param->private_data_len;
4536 if (id_priv->id.qp)
4537 iw_param.qpn = id_priv->qp_num;
4538 else
4539 iw_param.qpn = conn_param->qp_num;
4540
4541 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
4542}
4543
4544static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
4545 enum ib_cm_sidr_status status, u32 qkey,
4546 const void *private_data, int private_data_len)
4547{
4548 struct ib_cm_sidr_rep_param rep;
4549 int ret;
4550
4551 memset(&rep, 0, sizeof rep);
4552 rep.status = status;
4553 if (status == IB_SIDR_SUCCESS) {
4554 ret = cma_set_qkey(id_priv, qkey);
4555 if (ret)
4556 return ret;
4557 rep.qp_num = id_priv->qp_num;
4558 rep.qkey = id_priv->qkey;
4559
4560 rep.ece.vendor_id = id_priv->ece.vendor_id;
4561 rep.ece.attr_mod = id_priv->ece.attr_mod;
4562 }
4563
4564 rep.private_data = private_data;
4565 rep.private_data_len = private_data_len;
4566
4567 trace_cm_send_sidr_rep(id_priv);
4568 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
4569}
4570
4571/**
4572 * rdma_accept - Called to accept a connection request or response.
4573 * @id: Connection identifier associated with the request.
4574 * @conn_param: Information needed to establish the connection. This must be
4575 * provided if accepting a connection request. If accepting a connection
4576 * response, this parameter must be NULL.
4577 *
4578 * Typically, this routine is only called by the listener to accept a connection
4579 * request. It must also be called on the active side of a connection if the
4580 * user is performing their own QP transitions.
4581 *
4582 * In the case of error, a reject message is sent to the remote side and the
4583 * state of the qp associated with the id is modified to error, such that any
4584 * previously posted receive buffers would be flushed.
4585 *
4586 * This function is for use by kernel ULPs and must be called from under the
4587 * handler callback.
4588 */
4589int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4590{
4591 struct rdma_id_private *id_priv =
4592 container_of(id, struct rdma_id_private, id);
4593 int ret;
4594
4595 lockdep_assert_held(&id_priv->handler_mutex);
4596
4597 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4598 return -EINVAL;
4599
4600 if (!id->qp && conn_param) {
4601 id_priv->qp_num = conn_param->qp_num;
4602 id_priv->srq = conn_param->srq;
4603 }
4604
4605 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4606 if (id->qp_type == IB_QPT_UD) {
4607 if (conn_param)
4608 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4609 conn_param->qkey,
4610 conn_param->private_data,
4611 conn_param->private_data_len);
4612 else
4613 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4614 0, NULL, 0);
4615 } else {
4616 if (conn_param)
4617 ret = cma_accept_ib(id_priv, conn_param);
4618 else
4619 ret = cma_rep_recv(id_priv);
4620 }
4621 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4622 ret = cma_accept_iw(id_priv, conn_param);
4623 } else {
4624 ret = -ENOSYS;
4625 }
4626 if (ret)
4627 goto reject;
4628
4629 return 0;
4630reject:
4631 cma_modify_qp_err(id_priv);
4632 rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
4633 return ret;
4634}
4635EXPORT_SYMBOL(rdma_accept);
4636
4637int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4638 struct rdma_ucm_ece *ece)
4639{
4640 struct rdma_id_private *id_priv =
4641 container_of(id, struct rdma_id_private, id);
4642
4643 id_priv->ece.vendor_id = ece->vendor_id;
4644 id_priv->ece.attr_mod = ece->attr_mod;
4645
4646 return rdma_accept(id, conn_param);
4647}
4648EXPORT_SYMBOL(rdma_accept_ece);
4649
4650void rdma_lock_handler(struct rdma_cm_id *id)
4651{
4652 struct rdma_id_private *id_priv =
4653 container_of(id, struct rdma_id_private, id);
4654
4655 mutex_lock(&id_priv->handler_mutex);
4656}
4657EXPORT_SYMBOL(rdma_lock_handler);
4658
4659void rdma_unlock_handler(struct rdma_cm_id *id)
4660{
4661 struct rdma_id_private *id_priv =
4662 container_of(id, struct rdma_id_private, id);
4663
4664 mutex_unlock(&id_priv->handler_mutex);
4665}
4666EXPORT_SYMBOL(rdma_unlock_handler);
4667
4668int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
4669{
4670 struct rdma_id_private *id_priv;
4671 int ret;
4672
4673 id_priv = container_of(id, struct rdma_id_private, id);
4674 if (!id_priv->cm_id.ib)
4675 return -EINVAL;
4676
4677 switch (id->device->node_type) {
4678 case RDMA_NODE_IB_CA:
4679 ret = ib_cm_notify(id_priv->cm_id.ib, event);
4680 break;
4681 default:
4682 ret = 0;
4683 break;
4684 }
4685 return ret;
4686}
4687EXPORT_SYMBOL(rdma_notify);
4688
4689int rdma_reject(struct rdma_cm_id *id, const void *private_data,
4690 u8 private_data_len, u8 reason)
4691{
4692 struct rdma_id_private *id_priv;
4693 int ret;
4694
4695 id_priv = container_of(id, struct rdma_id_private, id);
4696 if (!id_priv->cm_id.ib)
4697 return -EINVAL;
4698
4699 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4700 if (id->qp_type == IB_QPT_UD) {
4701 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
4702 private_data, private_data_len);
4703 } else {
4704 trace_cm_send_rej(id_priv);
4705 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
4706 private_data, private_data_len);
4707 }
4708 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4709 ret = iw_cm_reject(id_priv->cm_id.iw,
4710 private_data, private_data_len);
4711 } else {
4712 ret = -ENOSYS;
4713 }
4714
4715 return ret;
4716}
4717EXPORT_SYMBOL(rdma_reject);
4718
4719int rdma_disconnect(struct rdma_cm_id *id)
4720{
4721 struct rdma_id_private *id_priv;
4722 int ret;
4723
4724 id_priv = container_of(id, struct rdma_id_private, id);
4725 if (!id_priv->cm_id.ib)
4726 return -EINVAL;
4727
4728 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4729 ret = cma_modify_qp_err(id_priv);
4730 if (ret)
4731 goto out;
4732 /* Initiate or respond to a disconnect. */
4733 trace_cm_disconnect(id_priv);
4734 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
4735 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
4736 trace_cm_sent_drep(id_priv);
4737 } else {
4738 trace_cm_sent_dreq(id_priv);
4739 }
4740 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4741 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
4742 } else
4743 ret = -EINVAL;
4744
4745out:
4746 return ret;
4747}
4748EXPORT_SYMBOL(rdma_disconnect);
4749
4750static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
4751 struct ib_sa_multicast *multicast,
4752 struct rdma_cm_event *event,
4753 struct cma_multicast *mc)
4754{
4755 struct rdma_dev_addr *dev_addr;
4756 enum ib_gid_type gid_type;
4757 struct net_device *ndev;
4758
4759 if (!status)
4760 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
4761 else
4762 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4763 status);
4764
4765 event->status = status;
4766 event->param.ud.private_data = mc->context;
4767 if (status) {
4768 event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4769 return;
4770 }
4771
4772 dev_addr = &id_priv->id.route.addr.dev_addr;
4773 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4774 gid_type =
4775 id_priv->cma_dev
4776 ->default_gid_type[id_priv->id.port_num -
4777 rdma_start_port(
4778 id_priv->cma_dev->device)];
4779
4780 event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
4781 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
4782 &multicast->rec, ndev, gid_type,
4783 &event->param.ud.ah_attr)) {
4784 event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4785 goto out;
4786 }
4787
4788 event->param.ud.qp_num = 0xFFFFFF;
4789 event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
4790
4791out:
4792 if (ndev)
4793 dev_put(ndev);
4794}
4795
4796static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
4797{
4798 struct cma_multicast *mc = multicast->context;
4799 struct rdma_id_private *id_priv = mc->id_priv;
4800 struct rdma_cm_event event = {};
4801 int ret = 0;
4802
4803 mutex_lock(&id_priv->handler_mutex);
4804 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
4805 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
4806 goto out;
4807
4808 cma_make_mc_event(status, id_priv, multicast, &event, mc);
4809 ret = cma_cm_event_handler(id_priv, &event);
4810 rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4811 WARN_ON(ret);
4812
4813out:
4814 mutex_unlock(&id_priv->handler_mutex);
4815 return 0;
4816}
4817
4818static void cma_set_mgid(struct rdma_id_private *id_priv,
4819 struct sockaddr *addr, union ib_gid *mgid)
4820{
4821 unsigned char mc_map[MAX_ADDR_LEN];
4822 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4823 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
4824 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
4825
4826 if (cma_any_addr(addr)) {
4827 memset(mgid, 0, sizeof *mgid);
4828 } else if ((addr->sa_family == AF_INET6) &&
4829 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
4830 0xFF10A01B)) {
4831 /* IPv6 address is an SA assigned MGID. */
4832 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4833 } else if (addr->sa_family == AF_IB) {
4834 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
4835 } else if (addr->sa_family == AF_INET6) {
4836 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
4837 if (id_priv->id.ps == RDMA_PS_UDP)
4838 mc_map[7] = 0x01; /* Use RDMA CM signature */
4839 *mgid = *(union ib_gid *) (mc_map + 4);
4840 } else {
4841 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
4842 if (id_priv->id.ps == RDMA_PS_UDP)
4843 mc_map[7] = 0x01; /* Use RDMA CM signature */
4844 *mgid = *(union ib_gid *) (mc_map + 4);
4845 }
4846}
4847
4848static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
4849 struct cma_multicast *mc)
4850{
4851 struct ib_sa_mcmember_rec rec;
4852 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4853 ib_sa_comp_mask comp_mask;
4854 int ret;
4855
4856 ib_addr_get_mgid(dev_addr, &rec.mgid);
4857 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4858 &rec.mgid, &rec);
4859 if (ret)
4860 return ret;
4861
4862 ret = cma_set_qkey(id_priv, 0);
4863 if (ret)
4864 return ret;
4865
4866 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
4867 rec.qkey = cpu_to_be32(id_priv->qkey);
4868 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
4869 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
4870 rec.join_state = mc->join_state;
4871
4872 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
4873 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
4874 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
4875 IB_SA_MCMEMBER_REC_FLOW_LABEL |
4876 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
4877
4878 if (id_priv->id.ps == RDMA_PS_IPOIB)
4879 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
4880 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
4881 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
4882 IB_SA_MCMEMBER_REC_MTU |
4883 IB_SA_MCMEMBER_REC_HOP_LIMIT;
4884
4885 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4886 id_priv->id.port_num, &rec, comp_mask,
4887 GFP_KERNEL, cma_ib_mc_handler, mc);
4888 return PTR_ERR_OR_ZERO(mc->sa_mc);
4889}
4890
4891static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
4892 enum ib_gid_type gid_type)
4893{
4894 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
4895 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
4896
4897 if (cma_any_addr(addr)) {
4898 memset(mgid, 0, sizeof *mgid);
4899 } else if (addr->sa_family == AF_INET6) {
4900 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4901 } else {
4902 mgid->raw[0] =
4903 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
4904 mgid->raw[1] =
4905 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
4906 mgid->raw[2] = 0;
4907 mgid->raw[3] = 0;
4908 mgid->raw[4] = 0;
4909 mgid->raw[5] = 0;
4910 mgid->raw[6] = 0;
4911 mgid->raw[7] = 0;
4912 mgid->raw[8] = 0;
4913 mgid->raw[9] = 0;
4914 mgid->raw[10] = 0xff;
4915 mgid->raw[11] = 0xff;
4916 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
4917 }
4918}
4919
4920static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
4921 struct cma_multicast *mc)
4922{
4923 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4924 int err = 0;
4925 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
4926 struct net_device *ndev = NULL;
4927 struct ib_sa_multicast ib;
4928 enum ib_gid_type gid_type;
4929 bool send_only;
4930
4931 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
4932
4933 if (cma_zero_addr(addr))
4934 return -EINVAL;
4935
4936 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4937 rdma_start_port(id_priv->cma_dev->device)];
4938 cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
4939
4940 ib.rec.pkey = cpu_to_be16(0xffff);
4941 if (id_priv->id.ps == RDMA_PS_UDP)
4942 ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
4943
4944 if (dev_addr->bound_dev_if)
4945 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4946 if (!ndev)
4947 return -ENODEV;
4948
4949 ib.rec.rate = iboe_get_rate(ndev);
4950 ib.rec.hop_limit = 1;
4951 ib.rec.mtu = iboe_get_mtu(ndev->mtu);
4952
4953 if (addr->sa_family == AF_INET) {
4954 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
4955 ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
4956 if (!send_only) {
4957 err = cma_igmp_send(ndev, &ib.rec.mgid,
4958 true);
4959 }
4960 }
4961 } else {
4962 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4963 err = -ENOTSUPP;
4964 }
4965 dev_put(ndev);
4966 if (err || !ib.rec.mtu)
4967 return err ?: -EINVAL;
4968
4969 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
4970 &ib.rec.port_gid);
4971 INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
4972 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
4973 queue_work(cma_wq, &mc->iboe_join.work);
4974 return 0;
4975}
4976
4977int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4978 u8 join_state, void *context)
4979{
4980 struct rdma_id_private *id_priv =
4981 container_of(id, struct rdma_id_private, id);
4982 struct cma_multicast *mc;
4983 int ret;
4984
4985 /* Not supported for kernel QPs */
4986 if (WARN_ON(id->qp))
4987 return -EINVAL;
4988
4989 /* ULP is calling this wrong. */
4990 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
4991 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
4992 return -EINVAL;
4993
4994 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
4995 if (!mc)
4996 return -ENOMEM;
4997
4998 memcpy(&mc->addr, addr, rdma_addr_size(addr));
4999 mc->context = context;
5000 mc->id_priv = id_priv;
5001 mc->join_state = join_state;
5002
5003 if (rdma_protocol_roce(id->device, id->port_num)) {
5004 ret = cma_iboe_join_multicast(id_priv, mc);
5005 if (ret)
5006 goto out_err;
5007 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
5008 ret = cma_join_ib_multicast(id_priv, mc);
5009 if (ret)
5010 goto out_err;
5011 } else {
5012 ret = -ENOSYS;
5013 goto out_err;
5014 }
5015
5016 spin_lock(&id_priv->lock);
5017 list_add(&mc->list, &id_priv->mc_list);
5018 spin_unlock(&id_priv->lock);
5019
5020 return 0;
5021out_err:
5022 kfree(mc);
5023 return ret;
5024}
5025EXPORT_SYMBOL(rdma_join_multicast);
5026
5027void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
5028{
5029 struct rdma_id_private *id_priv;
5030 struct cma_multicast *mc;
5031
5032 id_priv = container_of(id, struct rdma_id_private, id);
5033 spin_lock_irq(&id_priv->lock);
5034 list_for_each_entry(mc, &id_priv->mc_list, list) {
5035 if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
5036 continue;
5037 list_del(&mc->list);
5038 spin_unlock_irq(&id_priv->lock);
5039
5040 WARN_ON(id_priv->cma_dev->device != id->device);
5041 destroy_mc(id_priv, mc);
5042 return;
5043 }
5044 spin_unlock_irq(&id_priv->lock);
5045}
5046EXPORT_SYMBOL(rdma_leave_multicast);
5047
5048static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
5049{
5050 struct rdma_dev_addr *dev_addr;
5051 struct cma_work *work;
5052
5053 dev_addr = &id_priv->id.route.addr.dev_addr;
5054
5055 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
5056 (net_eq(dev_net(ndev), dev_addr->net)) &&
5057 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
5058 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
5059 ndev->name, &id_priv->id);
5060 work = kzalloc(sizeof *work, GFP_KERNEL);
5061 if (!work)
5062 return -ENOMEM;
5063
5064 INIT_WORK(&work->work, cma_work_handler);
5065 work->id = id_priv;
5066 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
5067 cma_id_get(id_priv);
5068 queue_work(cma_wq, &work->work);
5069 }
5070
5071 return 0;
5072}
5073
5074static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
5075 void *ptr)
5076{
5077 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5078 struct cma_device *cma_dev;
5079 struct rdma_id_private *id_priv;
5080 int ret = NOTIFY_DONE;
5081
5082 if (event != NETDEV_BONDING_FAILOVER)
5083 return NOTIFY_DONE;
5084
5085 if (!netif_is_bond_master(ndev))
5086 return NOTIFY_DONE;
5087
5088 mutex_lock(&lock);
5089 list_for_each_entry(cma_dev, &dev_list, list)
5090 list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
5091 ret = cma_netdev_change(ndev, id_priv);
5092 if (ret)
5093 goto out;
5094 }
5095
5096out:
5097 mutex_unlock(&lock);
5098 return ret;
5099}
5100
5101static void cma_netevent_work_handler(struct work_struct *_work)
5102{
5103 struct rdma_id_private *id_priv =
5104 container_of(_work, struct rdma_id_private, id.net_work);
5105 struct rdma_cm_event event = {};
5106
5107 mutex_lock(&id_priv->handler_mutex);
5108
5109 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
5110 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
5111 goto out_unlock;
5112
5113 event.event = RDMA_CM_EVENT_UNREACHABLE;
5114 event.status = -ETIMEDOUT;
5115
5116 if (cma_cm_event_handler(id_priv, &event)) {
5117 __acquire(&id_priv->handler_mutex);
5118 id_priv->cm_id.ib = NULL;
5119 cma_id_put(id_priv);
5120 destroy_id_handler_unlock(id_priv);
5121 return;
5122 }
5123
5124out_unlock:
5125 mutex_unlock(&id_priv->handler_mutex);
5126 cma_id_put(id_priv);
5127}
5128
5129static int cma_netevent_callback(struct notifier_block *self,
5130 unsigned long event, void *ctx)
5131{
5132 struct id_table_entry *ips_node = NULL;
5133 struct rdma_id_private *current_id;
5134 struct neighbour *neigh = ctx;
5135 unsigned long flags;
5136
5137 if (event != NETEVENT_NEIGH_UPDATE)
5138 return NOTIFY_DONE;
5139
5140 spin_lock_irqsave(&id_table_lock, flags);
5141 if (neigh->tbl->family == AF_INET6) {
5142 struct sockaddr_in6 neigh_sock_6;
5143
5144 neigh_sock_6.sin6_family = AF_INET6;
5145 neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
5146 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5147 (struct sockaddr *)&neigh_sock_6);
5148 } else if (neigh->tbl->family == AF_INET) {
5149 struct sockaddr_in neigh_sock_4;
5150
5151 neigh_sock_4.sin_family = AF_INET;
5152 neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
5153 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5154 (struct sockaddr *)&neigh_sock_4);
5155 } else
5156 goto out;
5157
5158 if (!ips_node)
5159 goto out;
5160
5161 list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
5162 if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
5163 neigh->ha, ETH_ALEN))
5164 continue;
5165 INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler);
5166 cma_id_get(current_id);
5167 queue_work(cma_wq, ¤t_id->id.net_work);
5168 }
5169out:
5170 spin_unlock_irqrestore(&id_table_lock, flags);
5171 return NOTIFY_DONE;
5172}
5173
5174static struct notifier_block cma_nb = {
5175 .notifier_call = cma_netdev_callback
5176};
5177
5178static struct notifier_block cma_netevent_cb = {
5179 .notifier_call = cma_netevent_callback
5180};
5181
5182static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
5183{
5184 struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
5185 enum rdma_cm_state state;
5186 unsigned long flags;
5187
5188 mutex_lock(&id_priv->handler_mutex);
5189 /* Record that we want to remove the device */
5190 spin_lock_irqsave(&id_priv->lock, flags);
5191 state = id_priv->state;
5192 if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
5193 spin_unlock_irqrestore(&id_priv->lock, flags);
5194 mutex_unlock(&id_priv->handler_mutex);
5195 cma_id_put(id_priv);
5196 return;
5197 }
5198 id_priv->state = RDMA_CM_DEVICE_REMOVAL;
5199 spin_unlock_irqrestore(&id_priv->lock, flags);
5200
5201 if (cma_cm_event_handler(id_priv, &event)) {
5202 /*
5203 * At this point the ULP promises it won't call
5204 * rdma_destroy_id() concurrently
5205 */
5206 cma_id_put(id_priv);
5207 mutex_unlock(&id_priv->handler_mutex);
5208 trace_cm_id_destroy(id_priv);
5209 _destroy_id(id_priv, state);
5210 return;
5211 }
5212 mutex_unlock(&id_priv->handler_mutex);
5213
5214 /*
5215 * If this races with destroy then the thread that first assigns state
5216 * to a destroying does the cancel.
5217 */
5218 cma_cancel_operation(id_priv, state);
5219 cma_id_put(id_priv);
5220}
5221
5222static void cma_process_remove(struct cma_device *cma_dev)
5223{
5224 mutex_lock(&lock);
5225 while (!list_empty(&cma_dev->id_list)) {
5226 struct rdma_id_private *id_priv = list_first_entry(
5227 &cma_dev->id_list, struct rdma_id_private, device_item);
5228
5229 list_del_init(&id_priv->listen_item);
5230 list_del_init(&id_priv->device_item);
5231 cma_id_get(id_priv);
5232 mutex_unlock(&lock);
5233
5234 cma_send_device_removal_put(id_priv);
5235
5236 mutex_lock(&lock);
5237 }
5238 mutex_unlock(&lock);
5239
5240 cma_dev_put(cma_dev);
5241 wait_for_completion(&cma_dev->comp);
5242}
5243
5244static bool cma_supported(struct ib_device *device)
5245{
5246 u32 i;
5247
5248 rdma_for_each_port(device, i) {
5249 if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
5250 return true;
5251 }
5252 return false;
5253}
5254
5255static int cma_add_one(struct ib_device *device)
5256{
5257 struct rdma_id_private *to_destroy;
5258 struct cma_device *cma_dev;
5259 struct rdma_id_private *id_priv;
5260 unsigned long supported_gids = 0;
5261 int ret;
5262 u32 i;
5263
5264 if (!cma_supported(device))
5265 return -EOPNOTSUPP;
5266
5267 cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
5268 if (!cma_dev)
5269 return -ENOMEM;
5270
5271 cma_dev->device = device;
5272 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
5273 sizeof(*cma_dev->default_gid_type),
5274 GFP_KERNEL);
5275 if (!cma_dev->default_gid_type) {
5276 ret = -ENOMEM;
5277 goto free_cma_dev;
5278 }
5279
5280 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
5281 sizeof(*cma_dev->default_roce_tos),
5282 GFP_KERNEL);
5283 if (!cma_dev->default_roce_tos) {
5284 ret = -ENOMEM;
5285 goto free_gid_type;
5286 }
5287
5288 rdma_for_each_port (device, i) {
5289 supported_gids = roce_gid_type_mask_support(device, i);
5290 WARN_ON(!supported_gids);
5291 if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
5292 cma_dev->default_gid_type[i - rdma_start_port(device)] =
5293 CMA_PREFERRED_ROCE_GID_TYPE;
5294 else
5295 cma_dev->default_gid_type[i - rdma_start_port(device)] =
5296 find_first_bit(&supported_gids, BITS_PER_LONG);
5297 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
5298 }
5299
5300 init_completion(&cma_dev->comp);
5301 refcount_set(&cma_dev->refcount, 1);
5302 INIT_LIST_HEAD(&cma_dev->id_list);
5303 ib_set_client_data(device, &cma_client, cma_dev);
5304
5305 mutex_lock(&lock);
5306 list_add_tail(&cma_dev->list, &dev_list);
5307 list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
5308 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
5309 if (ret)
5310 goto free_listen;
5311 }
5312 mutex_unlock(&lock);
5313
5314 trace_cm_add_one(device);
5315 return 0;
5316
5317free_listen:
5318 list_del(&cma_dev->list);
5319 mutex_unlock(&lock);
5320
5321 /* cma_process_remove() will delete to_destroy */
5322 cma_process_remove(cma_dev);
5323 kfree(cma_dev->default_roce_tos);
5324free_gid_type:
5325 kfree(cma_dev->default_gid_type);
5326
5327free_cma_dev:
5328 kfree(cma_dev);
5329 return ret;
5330}
5331
5332static void cma_remove_one(struct ib_device *device, void *client_data)
5333{
5334 struct cma_device *cma_dev = client_data;
5335
5336 trace_cm_remove_one(device);
5337
5338 mutex_lock(&lock);
5339 list_del(&cma_dev->list);
5340 mutex_unlock(&lock);
5341
5342 cma_process_remove(cma_dev);
5343 kfree(cma_dev->default_roce_tos);
5344 kfree(cma_dev->default_gid_type);
5345 kfree(cma_dev);
5346}
5347
5348static int cma_init_net(struct net *net)
5349{
5350 struct cma_pernet *pernet = cma_pernet(net);
5351
5352 xa_init(&pernet->tcp_ps);
5353 xa_init(&pernet->udp_ps);
5354 xa_init(&pernet->ipoib_ps);
5355 xa_init(&pernet->ib_ps);
5356
5357 return 0;
5358}
5359
5360static void cma_exit_net(struct net *net)
5361{
5362 struct cma_pernet *pernet = cma_pernet(net);
5363
5364 WARN_ON(!xa_empty(&pernet->tcp_ps));
5365 WARN_ON(!xa_empty(&pernet->udp_ps));
5366 WARN_ON(!xa_empty(&pernet->ipoib_ps));
5367 WARN_ON(!xa_empty(&pernet->ib_ps));
5368}
5369
5370static struct pernet_operations cma_pernet_operations = {
5371 .init = cma_init_net,
5372 .exit = cma_exit_net,
5373 .id = &cma_pernet_id,
5374 .size = sizeof(struct cma_pernet),
5375};
5376
5377static int __init cma_init(void)
5378{
5379 int ret;
5380
5381 /*
5382 * There is a rare lock ordering dependency in cma_netdev_callback()
5383 * that only happens when bonding is enabled. Teach lockdep that rtnl
5384 * must never be nested under lock so it can find these without having
5385 * to test with bonding.
5386 */
5387 if (IS_ENABLED(CONFIG_LOCKDEP)) {
5388 rtnl_lock();
5389 mutex_lock(&lock);
5390 mutex_unlock(&lock);
5391 rtnl_unlock();
5392 }
5393
5394 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
5395 if (!cma_wq)
5396 return -ENOMEM;
5397
5398 ret = register_pernet_subsys(&cma_pernet_operations);
5399 if (ret)
5400 goto err_wq;
5401
5402 ib_sa_register_client(&sa_client);
5403 register_netdevice_notifier(&cma_nb);
5404 register_netevent_notifier(&cma_netevent_cb);
5405
5406 ret = ib_register_client(&cma_client);
5407 if (ret)
5408 goto err;
5409
5410 ret = cma_configfs_init();
5411 if (ret)
5412 goto err_ib;
5413
5414 return 0;
5415
5416err_ib:
5417 ib_unregister_client(&cma_client);
5418err:
5419 unregister_netevent_notifier(&cma_netevent_cb);
5420 unregister_netdevice_notifier(&cma_nb);
5421 ib_sa_unregister_client(&sa_client);
5422 unregister_pernet_subsys(&cma_pernet_operations);
5423err_wq:
5424 destroy_workqueue(cma_wq);
5425 return ret;
5426}
5427
5428static void __exit cma_cleanup(void)
5429{
5430 cma_configfs_exit();
5431 ib_unregister_client(&cma_client);
5432 unregister_netevent_notifier(&cma_netevent_cb);
5433 unregister_netdevice_notifier(&cma_nb);
5434 ib_sa_unregister_client(&sa_client);
5435 unregister_pernet_subsys(&cma_pernet_operations);
5436 destroy_workqueue(cma_wq);
5437}
5438
5439module_init(cma_init);
5440module_exit(cma_cleanup);