Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   5 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
   6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   7 * Copyright (c) 2019, Mellanox Technologies inc.  All rights reserved.
   8 */
   9
  10#include <linux/completion.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/device.h>
  13#include <linux/module.h>
  14#include <linux/err.h>
  15#include <linux/idr.h>
  16#include <linux/interrupt.h>
  17#include <linux/random.h>
  18#include <linux/rbtree.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sysfs.h>
  22#include <linux/workqueue.h>
  23#include <linux/kdev_t.h>
  24#include <linux/etherdevice.h>
  25
  26#include <rdma/ib_cache.h>
  27#include <rdma/ib_cm.h>
  28#include <rdma/ib_sysfs.h>
  29#include "cm_msgs.h"
  30#include "core_priv.h"
  31#include "cm_trace.h"
  32
  33MODULE_AUTHOR("Sean Hefty");
  34MODULE_DESCRIPTION("InfiniBand CM");
  35MODULE_LICENSE("Dual BSD/GPL");
  36
  37static const char * const ibcm_rej_reason_strs[] = {
  38	[IB_CM_REJ_NO_QP]			= "no QP",
  39	[IB_CM_REJ_NO_EEC]			= "no EEC",
  40	[IB_CM_REJ_NO_RESOURCES]		= "no resources",
  41	[IB_CM_REJ_TIMEOUT]			= "timeout",
  42	[IB_CM_REJ_UNSUPPORTED]			= "unsupported",
  43	[IB_CM_REJ_INVALID_COMM_ID]		= "invalid comm ID",
  44	[IB_CM_REJ_INVALID_COMM_INSTANCE]	= "invalid comm instance",
  45	[IB_CM_REJ_INVALID_SERVICE_ID]		= "invalid service ID",
  46	[IB_CM_REJ_INVALID_TRANSPORT_TYPE]	= "invalid transport type",
  47	[IB_CM_REJ_STALE_CONN]			= "stale conn",
  48	[IB_CM_REJ_RDC_NOT_EXIST]		= "RDC not exist",
  49	[IB_CM_REJ_INVALID_GID]			= "invalid GID",
  50	[IB_CM_REJ_INVALID_LID]			= "invalid LID",
  51	[IB_CM_REJ_INVALID_SL]			= "invalid SL",
  52	[IB_CM_REJ_INVALID_TRAFFIC_CLASS]	= "invalid traffic class",
  53	[IB_CM_REJ_INVALID_HOP_LIMIT]		= "invalid hop limit",
  54	[IB_CM_REJ_INVALID_PACKET_RATE]		= "invalid packet rate",
  55	[IB_CM_REJ_INVALID_ALT_GID]		= "invalid alt GID",
  56	[IB_CM_REJ_INVALID_ALT_LID]		= "invalid alt LID",
  57	[IB_CM_REJ_INVALID_ALT_SL]		= "invalid alt SL",
  58	[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS]	= "invalid alt traffic class",
  59	[IB_CM_REJ_INVALID_ALT_HOP_LIMIT]	= "invalid alt hop limit",
  60	[IB_CM_REJ_INVALID_ALT_PACKET_RATE]	= "invalid alt packet rate",
  61	[IB_CM_REJ_PORT_CM_REDIRECT]		= "port CM redirect",
  62	[IB_CM_REJ_PORT_REDIRECT]		= "port redirect",
  63	[IB_CM_REJ_INVALID_MTU]			= "invalid MTU",
  64	[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES]	= "insufficient resp resources",
  65	[IB_CM_REJ_CONSUMER_DEFINED]		= "consumer defined",
  66	[IB_CM_REJ_INVALID_RNR_RETRY]		= "invalid RNR retry",
  67	[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID]	= "duplicate local comm ID",
  68	[IB_CM_REJ_INVALID_CLASS_VERSION]	= "invalid class version",
  69	[IB_CM_REJ_INVALID_FLOW_LABEL]		= "invalid flow label",
  70	[IB_CM_REJ_INVALID_ALT_FLOW_LABEL]	= "invalid alt flow label",
  71	[IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
  72		"vendor option is not supported",
  73};
  74
  75const char *__attribute_const__ ibcm_reject_msg(int reason)
  76{
  77	size_t index = reason;
  78
  79	if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
  80	    ibcm_rej_reason_strs[index])
  81		return ibcm_rej_reason_strs[index];
  82	else
  83		return "unrecognized reason";
  84}
  85EXPORT_SYMBOL(ibcm_reject_msg);
  86
  87struct cm_id_private;
  88struct cm_work;
  89static int cm_add_one(struct ib_device *device);
  90static void cm_remove_one(struct ib_device *device, void *client_data);
  91static void cm_process_work(struct cm_id_private *cm_id_priv,
  92			    struct cm_work *work);
  93static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
  94				   struct ib_cm_sidr_rep_param *param);
  95static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
  96			       const void *private_data, u8 private_data_len);
  97static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
  98			       void *private_data, u8 private_data_len);
  99static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
 100			      enum ib_cm_rej_reason reason, void *ari,
 101			      u8 ari_length, const void *private_data,
 102			      u8 private_data_len);
 103
 104static struct ib_client cm_client = {
 105	.name   = "cm",
 106	.add    = cm_add_one,
 107	.remove = cm_remove_one
 108};
 109
 110static struct ib_cm {
 111	spinlock_t lock;
 112	struct list_head device_list;
 113	rwlock_t device_lock;
 114	struct rb_root listen_service_table;
 115	u64 listen_service_id;
 116	/* struct rb_root peer_service_table; todo: fix peer to peer */
 117	struct rb_root remote_qp_table;
 118	struct rb_root remote_id_table;
 119	struct rb_root remote_sidr_table;
 120	struct xarray local_id_table;
 121	u32 local_id_next;
 122	__be32 random_id_operand;
 123	struct list_head timewait_list;
 124	struct workqueue_struct *wq;
 125} cm;
 126
 127/* Counter indexes ordered by attribute ID */
 128enum {
 129	CM_REQ_COUNTER,
 130	CM_MRA_COUNTER,
 131	CM_REJ_COUNTER,
 132	CM_REP_COUNTER,
 133	CM_RTU_COUNTER,
 134	CM_DREQ_COUNTER,
 135	CM_DREP_COUNTER,
 136	CM_SIDR_REQ_COUNTER,
 137	CM_SIDR_REP_COUNTER,
 138	CM_LAP_COUNTER,
 139	CM_APR_COUNTER,
 140	CM_ATTR_COUNT,
 141	CM_ATTR_ID_OFFSET = 0x0010,
 142};
 143
 144enum {
 145	CM_XMIT,
 146	CM_XMIT_RETRIES,
 147	CM_RECV,
 148	CM_RECV_DUPLICATES,
 149	CM_COUNTER_GROUPS
 150};
 151
 152struct cm_counter_attribute {
 153	struct ib_port_attribute attr;
 154	unsigned short group;
 155	unsigned short index;
 156};
 157
 158struct cm_port {
 159	struct cm_device *cm_dev;
 160	struct ib_mad_agent *mad_agent;
 161	u32 port_num;
 162	atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
 163};
 164
 165struct cm_device {
 166	struct kref kref;
 167	struct list_head list;
 168	spinlock_t mad_agent_lock;
 169	struct ib_device *ib_device;
 170	u8 ack_delay;
 171	int going_down;
 172	struct cm_port *port[];
 173};
 174
 175struct cm_av {
 176	struct cm_port *port;
 177	struct rdma_ah_attr ah_attr;
 178	u16 dlid_datapath;
 179	u16 pkey_index;
 180	u8 timeout;
 181};
 182
 183struct cm_work {
 184	struct delayed_work work;
 185	struct list_head list;
 186	struct cm_port *port;
 187	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
 188	__be32 local_id;			/* Established / timewait */
 189	__be32 remote_id;
 190	struct ib_cm_event cm_event;
 191	struct sa_path_rec path[];
 192};
 193
 194struct cm_timewait_info {
 195	struct cm_work work;
 196	struct list_head list;
 197	struct rb_node remote_qp_node;
 198	struct rb_node remote_id_node;
 199	__be64 remote_ca_guid;
 200	__be32 remote_qpn;
 201	u8 inserted_remote_qp;
 202	u8 inserted_remote_id;
 203};
 204
 205struct cm_id_private {
 206	struct ib_cm_id	id;
 207
 208	struct rb_node service_node;
 209	struct rb_node sidr_id_node;
 210	u32 sidr_slid;
 211	spinlock_t lock;	/* Do not acquire inside cm.lock */
 212	struct completion comp;
 213	refcount_t refcount;
 214	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
 215	 * Protected by the cm.lock spinlock.
 216	 */
 217	int listen_sharecount;
 218	struct rcu_head rcu;
 219
 220	struct ib_mad_send_buf *msg;
 221	struct cm_timewait_info *timewait_info;
 222	/* todo: use alternate port on send failure */
 223	struct cm_av av;
 224	struct cm_av alt_av;
 225
 226	void *private_data;
 227	__be64 tid;
 228	__be32 local_qpn;
 229	__be32 remote_qpn;
 230	enum ib_qp_type qp_type;
 231	__be32 sq_psn;
 232	__be32 rq_psn;
 233	int timeout_ms;
 234	enum ib_mtu path_mtu;
 235	__be16 pkey;
 236	u8 private_data_len;
 237	u8 max_cm_retries;
 238	u8 responder_resources;
 239	u8 initiator_depth;
 240	u8 retry_count;
 241	u8 rnr_retry_count;
 242	u8 service_timeout;
 243	u8 target_ack_delay;
 244
 245	struct list_head work_list;
 246	atomic_t work_count;
 247
 248	struct rdma_ucm_ece ece;
 249};
 250
 251static void cm_dev_release(struct kref *kref)
 252{
 253	struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
 254	u32 i;
 255
 256	rdma_for_each_port(cm_dev->ib_device, i)
 257		kfree(cm_dev->port[i - 1]);
 258
 259	kfree(cm_dev);
 260}
 261
 262static void cm_device_put(struct cm_device *cm_dev)
 263{
 264	kref_put(&cm_dev->kref, cm_dev_release);
 265}
 266
 267static void cm_work_handler(struct work_struct *work);
 268
 269static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 270{
 271	if (refcount_dec_and_test(&cm_id_priv->refcount))
 272		complete(&cm_id_priv->comp);
 273}
 274
 275static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
 276{
 277	struct ib_mad_agent *mad_agent;
 278	struct ib_mad_send_buf *m;
 279	struct ib_ah *ah;
 280
 281	lockdep_assert_held(&cm_id_priv->lock);
 282
 283	if (!cm_id_priv->av.port)
 284		return ERR_PTR(-EINVAL);
 285
 286	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
 287	mad_agent = cm_id_priv->av.port->mad_agent;
 288	if (!mad_agent) {
 289		m = ERR_PTR(-EINVAL);
 290		goto out;
 291	}
 292
 293	ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
 294	if (IS_ERR(ah)) {
 295		m = ERR_CAST(ah);
 296		goto out;
 297	}
 298
 299	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
 300			       cm_id_priv->av.pkey_index,
 301			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
 302			       GFP_ATOMIC,
 303			       IB_MGMT_BASE_VERSION);
 304	if (IS_ERR(m)) {
 305		rdma_destroy_ah(ah, 0);
 306		goto out;
 307	}
 308
 309	/* Timeout set by caller if response is expected. */
 310	m->ah = ah;
 311	m->retries = cm_id_priv->max_cm_retries;
 312
 313	refcount_inc(&cm_id_priv->refcount);
 314	m->context[0] = cm_id_priv;
 315
 316out:
 317	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
 318	return m;
 319}
 320
 321static void cm_free_msg(struct ib_mad_send_buf *msg)
 322{
 323	struct cm_id_private *cm_id_priv = msg->context[0];
 324
 325	if (msg->ah)
 326		rdma_destroy_ah(msg->ah, 0);
 327	cm_deref_id(cm_id_priv);
 328	ib_free_send_mad(msg);
 329}
 330
 331static struct ib_mad_send_buf *
 332cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
 333{
 334	struct ib_mad_send_buf *msg;
 335
 336	lockdep_assert_held(&cm_id_priv->lock);
 337
 338	msg = cm_alloc_msg(cm_id_priv);
 339	if (IS_ERR(msg))
 340		return msg;
 341	cm_id_priv->msg = msg;
 342	return msg;
 343}
 344
 345static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
 346{
 347	struct cm_id_private *cm_id_priv = msg->context[0];
 348
 349	lockdep_assert_held(&cm_id_priv->lock);
 350
 351	if (!WARN_ON(cm_id_priv->msg != msg))
 352		cm_id_priv->msg = NULL;
 353
 354	if (msg->ah)
 355		rdma_destroy_ah(msg->ah, 0);
 356	cm_deref_id(cm_id_priv);
 357	ib_free_send_mad(msg);
 358}
 359
 360static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
 361							   struct ib_mad_recv_wc *mad_recv_wc)
 362{
 363	return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
 364				  0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
 365				  GFP_ATOMIC,
 366				  IB_MGMT_BASE_VERSION);
 367}
 368
 369static int cm_create_response_msg_ah(struct cm_port *port,
 370				     struct ib_mad_recv_wc *mad_recv_wc,
 371				     struct ib_mad_send_buf *msg)
 372{
 373	struct ib_ah *ah;
 374
 375	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
 376				  mad_recv_wc->recv_buf.grh, port->port_num);
 377	if (IS_ERR(ah))
 378		return PTR_ERR(ah);
 379
 380	msg->ah = ah;
 381	return 0;
 382}
 383
 384static int cm_alloc_response_msg(struct cm_port *port,
 385				 struct ib_mad_recv_wc *mad_recv_wc,
 386				 struct ib_mad_send_buf **msg)
 387{
 388	struct ib_mad_send_buf *m;
 389	int ret;
 390
 391	m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
 392	if (IS_ERR(m))
 393		return PTR_ERR(m);
 394
 395	ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
 396	if (ret) {
 397		ib_free_send_mad(m);
 398		return ret;
 399	}
 400
 401	*msg = m;
 402	return 0;
 403}
 404
 405static void cm_free_response_msg(struct ib_mad_send_buf *msg)
 406{
 407	if (msg->ah)
 408		rdma_destroy_ah(msg->ah, 0);
 409	ib_free_send_mad(msg);
 410}
 411
 412static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
 413{
 414	void *data;
 415
 416	if (!private_data || !private_data_len)
 417		return NULL;
 418
 419	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
 420	if (!data)
 421		return ERR_PTR(-ENOMEM);
 422
 423	return data;
 424}
 425
 426static void cm_set_private_data(struct cm_id_private *cm_id_priv,
 427				 void *private_data, u8 private_data_len)
 428{
 429	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
 430		kfree(cm_id_priv->private_data);
 431
 432	cm_id_priv->private_data = private_data;
 433	cm_id_priv->private_data_len = private_data_len;
 434}
 435
 436static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
 437{
 438	struct cm_port *old_port = av->port;
 439
 440	if (old_port == port)
 441		return;
 442
 443	av->port = port;
 444	if (old_port)
 445		cm_device_put(old_port->cm_dev);
 446	if (port)
 447		kref_get(&port->cm_dev->kref);
 448}
 449
 450static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
 451			       struct rdma_ah_attr *ah_attr, struct cm_av *av)
 452{
 453	cm_set_av_port(av, port);
 454	av->pkey_index = wc->pkey_index;
 455	rdma_move_ah_attr(&av->ah_attr, ah_attr);
 456}
 457
 458static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
 459				   struct ib_grh *grh, struct cm_av *av)
 460{
 461	cm_set_av_port(av, port);
 462	av->pkey_index = wc->pkey_index;
 463	return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
 464				       port->port_num, wc,
 465				       grh, &av->ah_attr);
 466}
 467
 468static struct cm_port *
 469get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
 470{
 471	struct cm_device *cm_dev;
 472	struct cm_port *port = NULL;
 473	unsigned long flags;
 474
 475	if (attr) {
 476		read_lock_irqsave(&cm.device_lock, flags);
 477		list_for_each_entry(cm_dev, &cm.device_list, list) {
 478			if (cm_dev->ib_device == attr->device) {
 479				port = cm_dev->port[attr->port_num - 1];
 480				break;
 481			}
 482		}
 483		read_unlock_irqrestore(&cm.device_lock, flags);
 484	} else {
 485		/* SGID attribute can be NULL in following
 486		 * conditions.
 487		 * (a) Alternative path
 488		 * (b) IB link layer without GRH
 489		 * (c) LAP send messages
 490		 */
 491		read_lock_irqsave(&cm.device_lock, flags);
 492		list_for_each_entry(cm_dev, &cm.device_list, list) {
 493			attr = rdma_find_gid(cm_dev->ib_device,
 494					     &path->sgid,
 495					     sa_conv_pathrec_to_gid_type(path),
 496					     NULL);
 497			if (!IS_ERR(attr)) {
 498				port = cm_dev->port[attr->port_num - 1];
 499				break;
 500			}
 501		}
 502		read_unlock_irqrestore(&cm.device_lock, flags);
 503		if (port)
 504			rdma_put_gid_attr(attr);
 505	}
 506	return port;
 507}
 508
 509static int cm_init_av_by_path(struct sa_path_rec *path,
 510			      const struct ib_gid_attr *sgid_attr,
 511			      struct cm_av *av)
 512{
 513	struct rdma_ah_attr new_ah_attr;
 514	struct cm_device *cm_dev;
 515	struct cm_port *port;
 516	int ret;
 517
 518	port = get_cm_port_from_path(path, sgid_attr);
 519	if (!port)
 520		return -EINVAL;
 521	cm_dev = port->cm_dev;
 522
 523	ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
 524				  be16_to_cpu(path->pkey), &av->pkey_index);
 525	if (ret)
 526		return ret;
 527
 528	cm_set_av_port(av, port);
 529
 530	/*
 531	 * av->ah_attr might be initialized based on wc or during
 532	 * request processing time which might have reference to sgid_attr.
 533	 * So initialize a new ah_attr on stack.
 534	 * If initialization fails, old ah_attr is used for sending any
 535	 * responses. If initialization is successful, than new ah_attr
 536	 * is used by overwriting the old one. So that right ah_attr
 537	 * can be used to return an error response.
 538	 */
 539	ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
 540					&new_ah_attr, sgid_attr);
 541	if (ret)
 542		return ret;
 543
 544	av->timeout = path->packet_life_time + 1;
 545	rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
 546	return 0;
 547}
 548
 549/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
 550static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
 551{
 552	cm_set_av_port(dest, src->port);
 553	cm_set_av_port(src, NULL);
 554	dest->pkey_index = src->pkey_index;
 555	rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
 556	dest->timeout = src->timeout;
 557}
 558
 559static void cm_destroy_av(struct cm_av *av)
 560{
 561	rdma_destroy_ah_attr(&av->ah_attr);
 562	cm_set_av_port(av, NULL);
 563}
 564
 565static u32 cm_local_id(__be32 local_id)
 566{
 567	return (__force u32) (local_id ^ cm.random_id_operand);
 568}
 569
 570static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
 571{
 572	struct cm_id_private *cm_id_priv;
 573
 574	rcu_read_lock();
 575	cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
 576	if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
 577	    !refcount_inc_not_zero(&cm_id_priv->refcount))
 578		cm_id_priv = NULL;
 579	rcu_read_unlock();
 580
 581	return cm_id_priv;
 582}
 583
 584/*
 585 * Trivial helpers to strip endian annotation and compare; the
 586 * endianness doesn't actually matter since we just need a stable
 587 * order for the RB tree.
 588 */
 589static int be32_lt(__be32 a, __be32 b)
 590{
 591	return (__force u32) a < (__force u32) b;
 592}
 593
 594static int be32_gt(__be32 a, __be32 b)
 595{
 596	return (__force u32) a > (__force u32) b;
 597}
 598
 599static int be64_lt(__be64 a, __be64 b)
 600{
 601	return (__force u64) a < (__force u64) b;
 602}
 603
 604static int be64_gt(__be64 a, __be64 b)
 605{
 606	return (__force u64) a > (__force u64) b;
 607}
 608
 609/*
 610 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
 611 * if the new ID was inserted, NULL if it could not be inserted due to a
 612 * collision, or the existing cm_id_priv ready for shared usage.
 613 */
 614static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
 615					      ib_cm_handler shared_handler)
 616{
 617	struct rb_node **link = &cm.listen_service_table.rb_node;
 618	struct rb_node *parent = NULL;
 619	struct cm_id_private *cur_cm_id_priv;
 620	__be64 service_id = cm_id_priv->id.service_id;
 
 621	unsigned long flags;
 622
 623	spin_lock_irqsave(&cm.lock, flags);
 624	while (*link) {
 625		parent = *link;
 626		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
 627					  service_node);
 628
 629		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
 630			link = &(*link)->rb_left;
 631		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
 632			link = &(*link)->rb_right;
 633		else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
 634			link = &(*link)->rb_left;
 635		else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
 636			link = &(*link)->rb_right;
 637		else {
 638			/*
 639			 * Sharing an ib_cm_id with different handlers is not
 640			 * supported
 641			 */
 642			if (cur_cm_id_priv->id.cm_handler != shared_handler ||
 643			    cur_cm_id_priv->id.context ||
 644			    WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
 645				spin_unlock_irqrestore(&cm.lock, flags);
 646				return NULL;
 647			}
 648			refcount_inc(&cur_cm_id_priv->refcount);
 649			cur_cm_id_priv->listen_sharecount++;
 650			spin_unlock_irqrestore(&cm.lock, flags);
 651			return cur_cm_id_priv;
 652		}
 
 
 
 
 
 
 
 
 
 
 
 653	}
 654	cm_id_priv->listen_sharecount++;
 655	rb_link_node(&cm_id_priv->service_node, parent, link);
 656	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
 657	spin_unlock_irqrestore(&cm.lock, flags);
 658	return cm_id_priv;
 659}
 660
 661static struct cm_id_private *cm_find_listen(struct ib_device *device,
 662					    __be64 service_id)
 663{
 664	struct rb_node *node = cm.listen_service_table.rb_node;
 665	struct cm_id_private *cm_id_priv;
 666
 667	while (node) {
 668		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
 669
 
 
 
 
 
 670		if (device < cm_id_priv->id.device)
 671			node = node->rb_left;
 672		else if (device > cm_id_priv->id.device)
 673			node = node->rb_right;
 674		else if (be64_lt(service_id, cm_id_priv->id.service_id))
 675			node = node->rb_left;
 676		else if (be64_gt(service_id, cm_id_priv->id.service_id))
 677			node = node->rb_right;
 678		else {
 679			refcount_inc(&cm_id_priv->refcount);
 680			return cm_id_priv;
 681		}
 682	}
 683	return NULL;
 684}
 685
 686static struct cm_timewait_info *
 687cm_insert_remote_id(struct cm_timewait_info *timewait_info)
 688{
 689	struct rb_node **link = &cm.remote_id_table.rb_node;
 690	struct rb_node *parent = NULL;
 691	struct cm_timewait_info *cur_timewait_info;
 692	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
 693	__be32 remote_id = timewait_info->work.remote_id;
 694
 695	while (*link) {
 696		parent = *link;
 697		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
 698					     remote_id_node);
 699		if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
 700			link = &(*link)->rb_left;
 701		else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
 702			link = &(*link)->rb_right;
 703		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 704			link = &(*link)->rb_left;
 705		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 706			link = &(*link)->rb_right;
 707		else
 708			return cur_timewait_info;
 709	}
 710	timewait_info->inserted_remote_id = 1;
 711	rb_link_node(&timewait_info->remote_id_node, parent, link);
 712	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
 713	return NULL;
 714}
 715
 716static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
 717					       __be32 remote_id)
 718{
 719	struct rb_node *node = cm.remote_id_table.rb_node;
 720	struct cm_timewait_info *timewait_info;
 721	struct cm_id_private *res = NULL;
 722
 723	spin_lock_irq(&cm.lock);
 724	while (node) {
 725		timewait_info = rb_entry(node, struct cm_timewait_info,
 726					 remote_id_node);
 727		if (be32_lt(remote_id, timewait_info->work.remote_id))
 728			node = node->rb_left;
 729		else if (be32_gt(remote_id, timewait_info->work.remote_id))
 730			node = node->rb_right;
 731		else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
 732			node = node->rb_left;
 733		else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
 734			node = node->rb_right;
 735		else {
 736			res = cm_acquire_id(timewait_info->work.local_id,
 737					     timewait_info->work.remote_id);
 738			break;
 739		}
 740	}
 741	spin_unlock_irq(&cm.lock);
 742	return res;
 743}
 744
 745static struct cm_timewait_info *
 746cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
 747{
 748	struct rb_node **link = &cm.remote_qp_table.rb_node;
 749	struct rb_node *parent = NULL;
 750	struct cm_timewait_info *cur_timewait_info;
 751	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
 752	__be32 remote_qpn = timewait_info->remote_qpn;
 753
 754	while (*link) {
 755		parent = *link;
 756		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
 757					     remote_qp_node);
 758		if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
 759			link = &(*link)->rb_left;
 760		else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
 761			link = &(*link)->rb_right;
 762		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 763			link = &(*link)->rb_left;
 764		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 765			link = &(*link)->rb_right;
 766		else
 767			return cur_timewait_info;
 768	}
 769	timewait_info->inserted_remote_qp = 1;
 770	rb_link_node(&timewait_info->remote_qp_node, parent, link);
 771	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
 772	return NULL;
 773}
 774
 775static struct cm_id_private *
 776cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
 777{
 778	struct rb_node **link = &cm.remote_sidr_table.rb_node;
 779	struct rb_node *parent = NULL;
 780	struct cm_id_private *cur_cm_id_priv;
 781	__be32 remote_id = cm_id_priv->id.remote_id;
 782
 783	while (*link) {
 784		parent = *link;
 785		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
 786					  sidr_id_node);
 787		if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
 788			link = &(*link)->rb_left;
 789		else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
 790			link = &(*link)->rb_right;
 791		else {
 792			if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
 793				link = &(*link)->rb_left;
 794			else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
 795				link = &(*link)->rb_right;
 796			else
 797				return cur_cm_id_priv;
 798		}
 799	}
 800	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
 801	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
 802	return NULL;
 803}
 804
 805static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
 806					      ib_cm_handler cm_handler,
 807					      void *context)
 808{
 809	struct cm_id_private *cm_id_priv;
 810	u32 id;
 811	int ret;
 812
 813	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
 814	if (!cm_id_priv)
 815		return ERR_PTR(-ENOMEM);
 816
 817	cm_id_priv->id.state = IB_CM_IDLE;
 818	cm_id_priv->id.device = device;
 819	cm_id_priv->id.cm_handler = cm_handler;
 820	cm_id_priv->id.context = context;
 821	cm_id_priv->id.remote_cm_qpn = 1;
 822
 823	RB_CLEAR_NODE(&cm_id_priv->service_node);
 824	RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
 825	spin_lock_init(&cm_id_priv->lock);
 826	init_completion(&cm_id_priv->comp);
 827	INIT_LIST_HEAD(&cm_id_priv->work_list);
 828	atomic_set(&cm_id_priv->work_count, -1);
 829	refcount_set(&cm_id_priv->refcount, 1);
 830
 831	ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
 832			      &cm.local_id_next, GFP_KERNEL);
 833	if (ret < 0)
 834		goto error;
 835	cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
 836
 837	return cm_id_priv;
 838
 839error:
 840	kfree(cm_id_priv);
 841	return ERR_PTR(ret);
 842}
 843
 844/*
 845 * Make the ID visible to the MAD handlers and other threads that use the
 846 * xarray.
 847 */
 848static void cm_finalize_id(struct cm_id_private *cm_id_priv)
 849{
 850	xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
 851		 cm_id_priv, GFP_ATOMIC);
 852}
 853
 854struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
 855				 ib_cm_handler cm_handler,
 856				 void *context)
 857{
 858	struct cm_id_private *cm_id_priv;
 859
 860	cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
 861	if (IS_ERR(cm_id_priv))
 862		return ERR_CAST(cm_id_priv);
 863
 864	cm_finalize_id(cm_id_priv);
 865	return &cm_id_priv->id;
 866}
 867EXPORT_SYMBOL(ib_create_cm_id);
 868
 869static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
 870{
 871	struct cm_work *work;
 872
 873	if (list_empty(&cm_id_priv->work_list))
 874		return NULL;
 875
 876	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
 877	list_del(&work->list);
 878	return work;
 879}
 880
 881static void cm_free_work(struct cm_work *work)
 882{
 883	if (work->mad_recv_wc)
 884		ib_free_recv_mad(work->mad_recv_wc);
 885	kfree(work);
 886}
 887
 888static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
 889				 struct cm_work *work)
 890	__releases(&cm_id_priv->lock)
 891{
 892	bool immediate;
 893
 894	/*
 895	 * To deliver the event to the user callback we have the drop the
 896	 * spinlock, however, we need to ensure that the user callback is single
 897	 * threaded and receives events in the temporal order. If there are
 898	 * already events being processed then thread new events onto a list,
 899	 * the thread currently processing will pick them up.
 900	 */
 901	immediate = atomic_inc_and_test(&cm_id_priv->work_count);
 902	if (!immediate) {
 903		list_add_tail(&work->list, &cm_id_priv->work_list);
 904		/*
 905		 * This routine always consumes incoming reference. Once queued
 906		 * to the work_list then a reference is held by the thread
 907		 * currently running cm_process_work() and this reference is not
 908		 * needed.
 909		 */
 910		cm_deref_id(cm_id_priv);
 911	}
 912	spin_unlock_irq(&cm_id_priv->lock);
 913
 914	if (immediate)
 915		cm_process_work(cm_id_priv, work);
 916}
 917
 918static inline int cm_convert_to_ms(int iba_time)
 919{
 920	/* approximate conversion to ms from 4.096us x 2^iba_time */
 921	return 1 << max(iba_time - 8, 0);
 922}
 923
 924/*
 925 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
 926 * Because of how ack_timeout is stored, adding one doubles the timeout.
 927 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
 928 * increment it (round up) only if the other is within 50%.
 929 */
 930static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
 931{
 932	int ack_timeout = packet_life_time + 1;
 933
 934	if (ack_timeout >= ca_ack_delay)
 935		ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
 936	else
 937		ack_timeout = ca_ack_delay +
 938			      (ack_timeout >= (ca_ack_delay - 1));
 939
 940	return min(31, ack_timeout);
 941}
 942
 943static void cm_remove_remote(struct cm_id_private *cm_id_priv)
 944{
 945	struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
 946
 947	if (timewait_info->inserted_remote_id) {
 948		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
 949		timewait_info->inserted_remote_id = 0;
 950	}
 951
 952	if (timewait_info->inserted_remote_qp) {
 953		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
 954		timewait_info->inserted_remote_qp = 0;
 955	}
 956}
 957
 958static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
 959{
 960	struct cm_timewait_info *timewait_info;
 961
 962	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
 963	if (!timewait_info)
 964		return ERR_PTR(-ENOMEM);
 965
 966	timewait_info->work.local_id = local_id;
 967	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
 968	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
 969	return timewait_info;
 970}
 971
 972static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
 973{
 974	int wait_time;
 975	unsigned long flags;
 976	struct cm_device *cm_dev;
 977
 978	lockdep_assert_held(&cm_id_priv->lock);
 979
 980	cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
 981	if (!cm_dev)
 982		return;
 983
 984	spin_lock_irqsave(&cm.lock, flags);
 985	cm_remove_remote(cm_id_priv);
 986	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
 987	spin_unlock_irqrestore(&cm.lock, flags);
 988
 989	/*
 990	 * The cm_id could be destroyed by the user before we exit timewait.
 991	 * To protect against this, we search for the cm_id after exiting
 992	 * timewait before notifying the user that we've exited timewait.
 993	 */
 994	cm_id_priv->id.state = IB_CM_TIMEWAIT;
 995	wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
 996
 997	/* Check if the device started its remove_one */
 998	spin_lock_irqsave(&cm.lock, flags);
 999	if (!cm_dev->going_down)
1000		queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1001				   msecs_to_jiffies(wait_time));
1002	spin_unlock_irqrestore(&cm.lock, flags);
1003
1004	/*
1005	 * The timewait_info is converted into a work and gets freed during
1006	 * cm_free_work() in cm_timewait_handler().
1007	 */
1008	BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1009	cm_id_priv->timewait_info = NULL;
1010}
1011
1012static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1013{
1014	unsigned long flags;
1015
1016	lockdep_assert_held(&cm_id_priv->lock);
1017
1018	cm_id_priv->id.state = IB_CM_IDLE;
1019	if (cm_id_priv->timewait_info) {
1020		spin_lock_irqsave(&cm.lock, flags);
1021		cm_remove_remote(cm_id_priv);
1022		spin_unlock_irqrestore(&cm.lock, flags);
1023		kfree(cm_id_priv->timewait_info);
1024		cm_id_priv->timewait_info = NULL;
1025	}
1026}
1027
1028static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1029{
1030	struct cm_id_private *cm_id_priv;
1031	struct cm_work *work;
1032
1033	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1034	spin_lock_irq(&cm_id_priv->lock);
1035retest:
1036	switch (cm_id->state) {
1037	case IB_CM_LISTEN:
1038		spin_lock(&cm.lock);
1039		if (--cm_id_priv->listen_sharecount > 0) {
1040			/* The id is still shared. */
1041			WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1042			spin_unlock(&cm.lock);
1043			spin_unlock_irq(&cm_id_priv->lock);
1044			cm_deref_id(cm_id_priv);
1045			return;
1046		}
1047		cm_id->state = IB_CM_IDLE;
1048		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1049		RB_CLEAR_NODE(&cm_id_priv->service_node);
1050		spin_unlock(&cm.lock);
1051		break;
1052	case IB_CM_SIDR_REQ_SENT:
1053		cm_id->state = IB_CM_IDLE;
1054		ib_cancel_mad(cm_id_priv->msg);
1055		break;
1056	case IB_CM_SIDR_REQ_RCVD:
1057		cm_send_sidr_rep_locked(cm_id_priv,
1058					&(struct ib_cm_sidr_rep_param){
1059						.status = IB_SIDR_REJECT });
1060		/* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1061		cm_id->state = IB_CM_IDLE;
1062		break;
1063	case IB_CM_REQ_SENT:
1064	case IB_CM_MRA_REQ_RCVD:
1065		ib_cancel_mad(cm_id_priv->msg);
1066		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1067				   &cm_id_priv->id.device->node_guid,
1068				   sizeof(cm_id_priv->id.device->node_guid),
1069				   NULL, 0);
1070		break;
1071	case IB_CM_REQ_RCVD:
1072		if (err == -ENOMEM) {
1073			/* Do not reject to allow future retries. */
1074			cm_reset_to_idle(cm_id_priv);
1075		} else {
1076			cm_send_rej_locked(cm_id_priv,
1077					   IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1078					   NULL, 0);
1079		}
1080		break;
1081	case IB_CM_REP_SENT:
1082	case IB_CM_MRA_REP_RCVD:
1083		ib_cancel_mad(cm_id_priv->msg);
1084		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1085				   0, NULL, 0);
1086		goto retest;
1087	case IB_CM_MRA_REQ_SENT:
1088	case IB_CM_REP_RCVD:
1089	case IB_CM_MRA_REP_SENT:
1090		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1091				   0, NULL, 0);
1092		break;
1093	case IB_CM_ESTABLISHED:
1094		if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1095			cm_id->state = IB_CM_IDLE;
1096			break;
1097		}
1098		cm_send_dreq_locked(cm_id_priv, NULL, 0);
1099		goto retest;
1100	case IB_CM_DREQ_SENT:
1101		ib_cancel_mad(cm_id_priv->msg);
1102		cm_enter_timewait(cm_id_priv);
1103		goto retest;
1104	case IB_CM_DREQ_RCVD:
1105		cm_send_drep_locked(cm_id_priv, NULL, 0);
1106		WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1107		goto retest;
1108	case IB_CM_TIMEWAIT:
1109		/*
1110		 * The cm_acquire_id in cm_timewait_handler will stop working
1111		 * once we do xa_erase below, so just move to idle here for
1112		 * consistency.
1113		 */
1114		cm_id->state = IB_CM_IDLE;
1115		break;
1116	case IB_CM_IDLE:
1117		break;
1118	}
1119	WARN_ON(cm_id->state != IB_CM_IDLE);
1120
1121	spin_lock(&cm.lock);
1122	/* Required for cleanup paths related cm_req_handler() */
1123	if (cm_id_priv->timewait_info) {
1124		cm_remove_remote(cm_id_priv);
1125		kfree(cm_id_priv->timewait_info);
1126		cm_id_priv->timewait_info = NULL;
1127	}
1128
1129	WARN_ON(cm_id_priv->listen_sharecount);
1130	WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1131	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1132		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1133	spin_unlock(&cm.lock);
1134	spin_unlock_irq(&cm_id_priv->lock);
1135
1136	xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1137	cm_deref_id(cm_id_priv);
1138	wait_for_completion(&cm_id_priv->comp);
1139	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1140		cm_free_work(work);
1141
1142	cm_destroy_av(&cm_id_priv->av);
1143	cm_destroy_av(&cm_id_priv->alt_av);
1144	kfree(cm_id_priv->private_data);
1145	kfree_rcu(cm_id_priv, rcu);
1146}
1147
1148void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1149{
1150	cm_destroy_id(cm_id, 0);
1151}
1152EXPORT_SYMBOL(ib_destroy_cm_id);
1153
1154static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
 
1155{
 
 
1156	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1157	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
1158		return -EINVAL;
1159
1160	if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1161		cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1162	else
 
1163		cm_id_priv->id.service_id = service_id;
1164
 
1165	return 0;
1166}
1167
1168/**
1169 * ib_cm_listen - Initiates listening on the specified service ID for
1170 *   connection and service ID resolution requests.
1171 * @cm_id: Connection identifier associated with the listen request.
1172 * @service_id: Service identifier matched against incoming connection
1173 *   and service ID resolution requests.  The service ID should be specified
1174 *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1175 *   assign a service ID to the caller.
 
 
 
 
1176 */
1177int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
1178{
1179	struct cm_id_private *cm_id_priv =
1180		container_of(cm_id, struct cm_id_private, id);
1181	unsigned long flags;
1182	int ret;
1183
1184	spin_lock_irqsave(&cm_id_priv->lock, flags);
1185	if (cm_id_priv->id.state != IB_CM_IDLE) {
1186		ret = -EINVAL;
1187		goto out;
1188	}
1189
1190	ret = cm_init_listen(cm_id_priv, service_id);
1191	if (ret)
1192		goto out;
1193
1194	if (!cm_insert_listen(cm_id_priv, NULL)) {
1195		ret = -EBUSY;
1196		goto out;
1197	}
1198
1199	cm_id_priv->id.state = IB_CM_LISTEN;
1200	ret = 0;
1201
1202out:
1203	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1204	return ret;
1205}
1206EXPORT_SYMBOL(ib_cm_listen);
1207
1208/**
1209 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1210 *			 the given service ID.
1211 *
1212 * If there's an existing ID listening on that same device and service ID,
1213 * return it.
1214 *
1215 * @device: Device associated with the cm_id.  All related communication will
1216 * be associated with the specified device.
1217 * @cm_handler: Callback invoked to notify the user of CM events.
1218 * @service_id: Service identifier matched against incoming connection
1219 *   and service ID resolution requests.  The service ID should be specified
1220 *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1221 *   assign a service ID to the caller.
1222 *
1223 * Callers should call ib_destroy_cm_id when done with the listener ID.
1224 */
1225struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1226				     ib_cm_handler cm_handler,
1227				     __be64 service_id)
1228{
1229	struct cm_id_private *listen_id_priv;
1230	struct cm_id_private *cm_id_priv;
1231	int err = 0;
1232
1233	/* Create an ID in advance, since the creation may sleep */
1234	cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1235	if (IS_ERR(cm_id_priv))
1236		return ERR_CAST(cm_id_priv);
1237
1238	err = cm_init_listen(cm_id_priv, service_id);
1239	if (err) {
1240		ib_destroy_cm_id(&cm_id_priv->id);
1241		return ERR_PTR(err);
1242	}
1243
1244	spin_lock_irq(&cm_id_priv->lock);
1245	listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1246	if (listen_id_priv != cm_id_priv) {
1247		spin_unlock_irq(&cm_id_priv->lock);
1248		ib_destroy_cm_id(&cm_id_priv->id);
1249		if (!listen_id_priv)
1250			return ERR_PTR(-EINVAL);
1251		return &listen_id_priv->id;
1252	}
1253	cm_id_priv->id.state = IB_CM_LISTEN;
1254	spin_unlock_irq(&cm_id_priv->lock);
1255
1256	/*
1257	 * A listen ID does not need to be in the xarray since it does not
1258	 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1259	 * and does not enter timewait.
1260	 */
1261
1262	return &cm_id_priv->id;
1263}
1264EXPORT_SYMBOL(ib_cm_insert_listen);
1265
1266static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1267{
1268	u64 hi_tid = 0, low_tid;
1269
1270	lockdep_assert_held(&cm_id_priv->lock);
1271
1272	low_tid = (u64)cm_id_priv->id.local_id;
1273	if (!cm_id_priv->av.port)
1274		return cpu_to_be64(low_tid);
1275
1276	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1277	if (cm_id_priv->av.port->mad_agent)
1278		hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1279	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1280	return cpu_to_be64(hi_tid | low_tid);
1281}
1282
1283static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1284			      __be16 attr_id, __be64 tid)
1285{
1286	hdr->base_version  = IB_MGMT_BASE_VERSION;
1287	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
1288	hdr->class_version = IB_CM_CLASS_VERSION;
1289	hdr->method	   = IB_MGMT_METHOD_SEND;
1290	hdr->attr_id	   = attr_id;
1291	hdr->tid	   = tid;
1292}
1293
1294static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1295				  __be64 tid, u32 attr_mod)
1296{
1297	cm_format_mad_hdr(hdr, attr_id, tid);
1298	hdr->attr_mod = cpu_to_be32(attr_mod);
1299}
1300
1301static void cm_format_req(struct cm_req_msg *req_msg,
1302			  struct cm_id_private *cm_id_priv,
1303			  struct ib_cm_req_param *param)
1304{
1305	struct sa_path_rec *pri_path = param->primary_path;
1306	struct sa_path_rec *alt_path = param->alternate_path;
1307	bool pri_ext = false;
1308	__be16 lid;
1309
1310	if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1311		pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1312					      pri_path->opa.slid);
1313
1314	cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1315			      cm_form_tid(cm_id_priv), param->ece.attr_mod);
1316
1317	IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1318		be32_to_cpu(cm_id_priv->id.local_id));
1319	IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1320	IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1321		be64_to_cpu(cm_id_priv->id.device->node_guid));
1322	IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1323	IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1324	IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1325		param->remote_cm_response_timeout);
1326	cm_req_set_qp_type(req_msg, param->qp_type);
1327	IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1328	IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1329	IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1330		param->local_cm_response_timeout);
1331	IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1332		be16_to_cpu(param->primary_path->pkey));
1333	IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1334		param->primary_path->mtu);
1335	IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1336
1337	if (param->qp_type != IB_QPT_XRC_INI) {
1338		IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1339			param->responder_resources);
1340		IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1341		IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1342			param->rnr_retry_count);
1343		IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1344	}
1345
1346	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1347		pri_path->sgid;
1348	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1349		pri_path->dgid;
1350	if (pri_ext) {
1351		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1352			->global.interface_id =
1353			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1354		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1355			->global.interface_id =
1356			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1357	}
1358	if (pri_path->hop_limit <= 1) {
1359		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1360			be16_to_cpu(pri_ext ? 0 :
1361					      htons(ntohl(sa_path_get_slid(
1362						      pri_path)))));
1363		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1364			be16_to_cpu(pri_ext ? 0 :
1365					      htons(ntohl(sa_path_get_dlid(
1366						      pri_path)))));
1367	} else {
1368
1369		if (param->primary_path_inbound) {
1370			lid = param->primary_path_inbound->ib.dlid;
1371			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1372				be16_to_cpu(lid));
1373		} else
1374			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1375				be16_to_cpu(IB_LID_PERMISSIVE));
1376
1377		/* Work-around until there's a way to obtain remote LID info */
 
 
1378		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1379			be16_to_cpu(IB_LID_PERMISSIVE));
1380	}
1381	IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1382		be32_to_cpu(pri_path->flow_label));
1383	IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1384	IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1385	IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1386	IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1387	IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1388		(pri_path->hop_limit <= 1));
1389	IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1390		cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1391			       pri_path->packet_life_time));
1392
1393	if (alt_path) {
1394		bool alt_ext = false;
1395
1396		if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1397			alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1398						      alt_path->opa.slid);
1399
1400		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1401			alt_path->sgid;
1402		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1403			alt_path->dgid;
1404		if (alt_ext) {
1405			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1406					req_msg)
1407				->global.interface_id =
1408				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1409			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1410					req_msg)
1411				->global.interface_id =
1412				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1413		}
1414		if (alt_path->hop_limit <= 1) {
1415			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1416				be16_to_cpu(
1417					alt_ext ? 0 :
1418						  htons(ntohl(sa_path_get_slid(
1419							  alt_path)))));
1420			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1421				be16_to_cpu(
1422					alt_ext ? 0 :
1423						  htons(ntohl(sa_path_get_dlid(
1424							  alt_path)))));
1425		} else {
1426			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1427				be16_to_cpu(IB_LID_PERMISSIVE));
1428			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1429				be16_to_cpu(IB_LID_PERMISSIVE));
1430		}
1431		IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1432			be32_to_cpu(alt_path->flow_label));
1433		IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1434		IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1435			alt_path->traffic_class);
1436		IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1437			alt_path->hop_limit);
1438		IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1439		IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1440			(alt_path->hop_limit <= 1));
1441		IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1442			cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1443				       alt_path->packet_life_time));
1444	}
1445	IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1446
1447	if (param->private_data && param->private_data_len)
1448		IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1449			    param->private_data_len);
1450}
1451
1452static int cm_validate_req_param(struct ib_cm_req_param *param)
1453{
1454	if (!param->primary_path)
1455		return -EINVAL;
1456
1457	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1458	    param->qp_type != IB_QPT_XRC_INI)
1459		return -EINVAL;
1460
1461	if (param->private_data &&
1462	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1463		return -EINVAL;
1464
1465	if (param->alternate_path &&
1466	    (param->alternate_path->pkey != param->primary_path->pkey ||
1467	     param->alternate_path->mtu != param->primary_path->mtu))
1468		return -EINVAL;
1469
1470	return 0;
1471}
1472
1473int ib_send_cm_req(struct ib_cm_id *cm_id,
1474		   struct ib_cm_req_param *param)
1475{
1476	struct cm_av av = {}, alt_av = {};
1477	struct cm_id_private *cm_id_priv;
1478	struct ib_mad_send_buf *msg;
1479	struct cm_req_msg *req_msg;
1480	unsigned long flags;
1481	int ret;
1482
1483	ret = cm_validate_req_param(param);
1484	if (ret)
1485		return ret;
1486
1487	/* Verify that we're not in timewait. */
1488	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1489	spin_lock_irqsave(&cm_id_priv->lock, flags);
1490	if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1491		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1492		return -EINVAL;
1493	}
1494	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1495
1496	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1497							    id.local_id);
1498	if (IS_ERR(cm_id_priv->timewait_info)) {
1499		ret = PTR_ERR(cm_id_priv->timewait_info);
1500		cm_id_priv->timewait_info = NULL;
1501		return ret;
1502	}
1503
1504	ret = cm_init_av_by_path(param->primary_path,
1505				 param->ppath_sgid_attr, &av);
1506	if (ret)
1507		return ret;
1508	if (param->alternate_path) {
1509		ret = cm_init_av_by_path(param->alternate_path, NULL,
1510					 &alt_av);
1511		if (ret) {
1512			cm_destroy_av(&av);
1513			return ret;
1514		}
1515	}
1516	cm_id->service_id = param->service_id;
 
1517	cm_id_priv->timeout_ms = cm_convert_to_ms(
1518				    param->primary_path->packet_life_time) * 2 +
1519				 cm_convert_to_ms(
1520				    param->remote_cm_response_timeout);
1521	cm_id_priv->max_cm_retries = param->max_cm_retries;
1522	cm_id_priv->initiator_depth = param->initiator_depth;
1523	cm_id_priv->responder_resources = param->responder_resources;
1524	cm_id_priv->retry_count = param->retry_count;
1525	cm_id_priv->path_mtu = param->primary_path->mtu;
1526	cm_id_priv->pkey = param->primary_path->pkey;
1527	cm_id_priv->qp_type = param->qp_type;
1528
1529	spin_lock_irqsave(&cm_id_priv->lock, flags);
1530
1531	cm_move_av_from_path(&cm_id_priv->av, &av);
1532	if (param->primary_path_outbound)
1533		cm_id_priv->av.dlid_datapath =
1534			be16_to_cpu(param->primary_path_outbound->ib.dlid);
1535
1536	if (param->alternate_path)
1537		cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
1538
1539	msg = cm_alloc_priv_msg(cm_id_priv);
1540	if (IS_ERR(msg)) {
1541		ret = PTR_ERR(msg);
1542		goto out_unlock;
1543	}
1544
1545	req_msg = (struct cm_req_msg *)msg->mad;
1546	cm_format_req(req_msg, cm_id_priv, param);
1547	cm_id_priv->tid = req_msg->hdr.tid;
1548	msg->timeout_ms = cm_id_priv->timeout_ms;
1549	msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
1550
1551	cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1552	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1553
1554	trace_icm_send_req(&cm_id_priv->id);
1555	ret = ib_post_send_mad(msg, NULL);
1556	if (ret)
1557		goto out_free;
1558	BUG_ON(cm_id->state != IB_CM_IDLE);
1559	cm_id->state = IB_CM_REQ_SENT;
1560	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1561	return 0;
1562out_free:
1563	cm_free_priv_msg(msg);
1564out_unlock:
1565	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1566	return ret;
1567}
1568EXPORT_SYMBOL(ib_send_cm_req);
1569
1570static int cm_issue_rej(struct cm_port *port,
1571			struct ib_mad_recv_wc *mad_recv_wc,
1572			enum ib_cm_rej_reason reason,
1573			enum cm_msg_response msg_rejected,
1574			void *ari, u8 ari_length)
1575{
1576	struct ib_mad_send_buf *msg = NULL;
1577	struct cm_rej_msg *rej_msg, *rcv_msg;
1578	int ret;
1579
1580	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1581	if (ret)
1582		return ret;
1583
1584	/* We just need common CM header information.  Cast to any message. */
1585	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1586	rej_msg = (struct cm_rej_msg *) msg->mad;
1587
1588	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1589	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1590		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1591	IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1592		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1593	IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1594	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1595
1596	if (ari && ari_length) {
1597		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1598		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1599	}
1600
1601	trace_icm_issue_rej(
1602		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1603		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1604	ret = ib_post_send_mad(msg, NULL);
1605	if (ret)
1606		cm_free_response_msg(msg);
1607
1608	return ret;
1609}
1610
1611static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1612{
1613	return ((cpu_to_be16(
1614			IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1615		(ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1616					       req_msg))));
1617}
1618
1619static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
1620				 struct sa_path_rec *path, union ib_gid *gid)
1621{
1622	if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1623		path->rec_type = SA_PATH_REC_TYPE_OPA;
1624	else
1625		path->rec_type = SA_PATH_REC_TYPE_IB;
1626}
1627
1628static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1629					struct sa_path_rec *primary_path,
1630					struct sa_path_rec *alt_path,
1631					struct ib_wc *wc)
1632{
1633	u32 lid;
1634
1635	if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1636		sa_path_set_dlid(primary_path, wc->slid);
 
 
1637		sa_path_set_slid(primary_path,
1638				 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1639					 req_msg));
1640	} else {
1641		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1642			CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1643		sa_path_set_dlid(primary_path, lid);
1644
1645		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1646			CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1647		sa_path_set_slid(primary_path, lid);
1648	}
1649
1650	if (!cm_req_has_alt_path(req_msg))
1651		return;
1652
1653	if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1654		sa_path_set_dlid(alt_path,
1655				 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1656					 req_msg));
1657		sa_path_set_slid(alt_path,
1658				 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1659					 req_msg));
1660	} else {
1661		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1662			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1663		sa_path_set_dlid(alt_path, lid);
1664
1665		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1666			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1667		sa_path_set_slid(alt_path, lid);
1668	}
1669}
1670
1671static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1672				     struct sa_path_rec *primary_path,
1673				     struct sa_path_rec *alt_path,
1674				     struct ib_wc *wc)
1675{
1676	primary_path->dgid =
1677		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1678	primary_path->sgid =
1679		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1680	primary_path->flow_label =
1681		cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1682	primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1683	primary_path->traffic_class =
1684		IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1685	primary_path->reversible = 1;
1686	primary_path->pkey =
1687		cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1688	primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1689	primary_path->mtu_selector = IB_SA_EQ;
1690	primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1691	primary_path->rate_selector = IB_SA_EQ;
1692	primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1693	primary_path->packet_life_time_selector = IB_SA_EQ;
1694	primary_path->packet_life_time =
1695		IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1696	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1697	primary_path->service_id =
1698		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1699	if (sa_path_is_roce(primary_path))
1700		primary_path->roce.route_resolved = false;
1701
1702	if (cm_req_has_alt_path(req_msg)) {
1703		alt_path->dgid = *IBA_GET_MEM_PTR(
1704			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1705		alt_path->sgid = *IBA_GET_MEM_PTR(
1706			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1707		alt_path->flow_label = cpu_to_be32(
1708			IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1709		alt_path->hop_limit =
1710			IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1711		alt_path->traffic_class =
1712			IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1713		alt_path->reversible = 1;
1714		alt_path->pkey =
1715			cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1716		alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1717		alt_path->mtu_selector = IB_SA_EQ;
1718		alt_path->mtu =
1719			IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1720		alt_path->rate_selector = IB_SA_EQ;
1721		alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1722		alt_path->packet_life_time_selector = IB_SA_EQ;
1723		alt_path->packet_life_time =
1724			IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1725		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1726		alt_path->service_id =
1727			cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1728
1729		if (sa_path_is_roce(alt_path))
1730			alt_path->roce.route_resolved = false;
1731	}
1732	cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
1733}
1734
1735static u16 cm_get_bth_pkey(struct cm_work *work)
1736{
1737	struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1738	u32 port_num = work->port->port_num;
1739	u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1740	u16 pkey;
1741	int ret;
1742
1743	ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1744	if (ret) {
1745		dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
1746				     port_num, pkey_index, ret);
1747		return 0;
1748	}
1749
1750	return pkey;
1751}
1752
1753/**
1754 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1755 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1756 * reject them as the local_gid will not match the sgid. Therefore,
1757 * change the pathrec's SGID to an IB SGID.
1758 *
1759 * @work: Work completion
1760 * @path: Path record
1761 */
1762static void cm_opa_to_ib_sgid(struct cm_work *work,
1763			      struct sa_path_rec *path)
1764{
1765	struct ib_device *dev = work->port->cm_dev->ib_device;
1766	u32 port_num = work->port->port_num;
1767
1768	if (rdma_cap_opa_ah(dev, port_num) &&
1769	    (ib_is_opa_gid(&path->sgid))) {
1770		union ib_gid sgid;
1771
1772		if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1773			dev_warn(&dev->dev,
1774				 "Error updating sgid in CM request\n");
1775			return;
1776		}
1777
1778		path->sgid = sgid;
1779	}
1780}
1781
1782static void cm_format_req_event(struct cm_work *work,
1783				struct cm_id_private *cm_id_priv,
1784				struct ib_cm_id *listen_id)
1785{
1786	struct cm_req_msg *req_msg;
1787	struct ib_cm_req_event_param *param;
1788
1789	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1790	param = &work->cm_event.param.req_rcvd;
1791	param->listen_id = listen_id;
1792	param->bth_pkey = cm_get_bth_pkey(work);
1793	param->port = cm_id_priv->av.port->port_num;
1794	param->primary_path = &work->path[0];
1795	cm_opa_to_ib_sgid(work, param->primary_path);
1796	if (cm_req_has_alt_path(req_msg)) {
1797		param->alternate_path = &work->path[1];
1798		cm_opa_to_ib_sgid(work, param->alternate_path);
1799	} else {
1800		param->alternate_path = NULL;
1801	}
1802	param->remote_ca_guid =
1803		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1804	param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1805	param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1806	param->qp_type = cm_req_get_qp_type(req_msg);
1807	param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1808	param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1809	param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1810	param->local_cm_response_timeout =
1811		IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1812	param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1813	param->remote_cm_response_timeout =
1814		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1815	param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1816	param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1817	param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1818	param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1819	param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1820	param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1821
1822	work->cm_event.private_data =
1823		IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1824}
1825
1826static void cm_process_work(struct cm_id_private *cm_id_priv,
1827			    struct cm_work *work)
1828{
1829	int ret;
1830
1831	/* We will typically only have the current event to report. */
1832	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1833	cm_free_work(work);
1834
1835	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1836		spin_lock_irq(&cm_id_priv->lock);
1837		work = cm_dequeue_work(cm_id_priv);
1838		spin_unlock_irq(&cm_id_priv->lock);
1839		if (!work)
1840			return;
1841
1842		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1843						&work->cm_event);
1844		cm_free_work(work);
1845	}
1846	cm_deref_id(cm_id_priv);
1847	if (ret)
1848		cm_destroy_id(&cm_id_priv->id, ret);
1849}
1850
1851static void cm_format_mra(struct cm_mra_msg *mra_msg,
1852			  struct cm_id_private *cm_id_priv,
1853			  enum cm_msg_response msg_mraed, u8 service_timeout,
1854			  const void *private_data, u8 private_data_len)
1855{
1856	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1857	IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1858	IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1859		be32_to_cpu(cm_id_priv->id.local_id));
1860	IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1861		be32_to_cpu(cm_id_priv->id.remote_id));
1862	IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1863
1864	if (private_data && private_data_len)
1865		IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1866			    private_data_len);
1867}
1868
1869static void cm_format_rej(struct cm_rej_msg *rej_msg,
1870			  struct cm_id_private *cm_id_priv,
1871			  enum ib_cm_rej_reason reason, void *ari,
1872			  u8 ari_length, const void *private_data,
1873			  u8 private_data_len, enum ib_cm_state state)
1874{
1875	lockdep_assert_held(&cm_id_priv->lock);
1876
1877	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1878	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1879		be32_to_cpu(cm_id_priv->id.remote_id));
1880
1881	switch (state) {
1882	case IB_CM_REQ_RCVD:
1883		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1884		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1885		break;
1886	case IB_CM_MRA_REQ_SENT:
1887		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1888			be32_to_cpu(cm_id_priv->id.local_id));
1889		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1890		break;
1891	case IB_CM_REP_RCVD:
1892	case IB_CM_MRA_REP_SENT:
1893		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1894			be32_to_cpu(cm_id_priv->id.local_id));
1895		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1896		break;
1897	default:
1898		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1899			be32_to_cpu(cm_id_priv->id.local_id));
1900		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1901			CM_MSG_RESPONSE_OTHER);
1902		break;
1903	}
1904
1905	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1906	if (ari && ari_length) {
1907		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1908		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1909	}
1910
1911	if (private_data && private_data_len)
1912		IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1913			    private_data_len);
1914}
1915
1916static void cm_dup_req_handler(struct cm_work *work,
1917			       struct cm_id_private *cm_id_priv)
1918{
1919	struct ib_mad_send_buf *msg = NULL;
1920	int ret;
1921
1922	atomic_long_inc(
1923		&work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
1924
1925	/* Quick state check to discard duplicate REQs. */
1926	spin_lock_irq(&cm_id_priv->lock);
1927	if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1928		spin_unlock_irq(&cm_id_priv->lock);
1929		return;
1930	}
1931	spin_unlock_irq(&cm_id_priv->lock);
1932
1933	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1934	if (ret)
1935		return;
1936
1937	spin_lock_irq(&cm_id_priv->lock);
1938	switch (cm_id_priv->id.state) {
1939	case IB_CM_MRA_REQ_SENT:
1940		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1941			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1942			      cm_id_priv->private_data,
1943			      cm_id_priv->private_data_len);
1944		break;
1945	case IB_CM_TIMEWAIT:
1946		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1947			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1948			      IB_CM_TIMEWAIT);
1949		break;
1950	default:
1951		goto unlock;
1952	}
1953	spin_unlock_irq(&cm_id_priv->lock);
1954
1955	trace_icm_send_dup_req(&cm_id_priv->id);
1956	ret = ib_post_send_mad(msg, NULL);
1957	if (ret)
1958		goto free;
1959	return;
1960
1961unlock:	spin_unlock_irq(&cm_id_priv->lock);
1962free:	cm_free_response_msg(msg);
1963}
1964
1965static struct cm_id_private *cm_match_req(struct cm_work *work,
1966					  struct cm_id_private *cm_id_priv)
1967{
1968	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1969	struct cm_timewait_info *timewait_info;
1970	struct cm_req_msg *req_msg;
1971
1972	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1973
1974	/* Check for possible duplicate REQ. */
1975	spin_lock_irq(&cm.lock);
1976	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1977	if (timewait_info) {
1978		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1979					   timewait_info->work.remote_id);
1980		spin_unlock_irq(&cm.lock);
1981		if (cur_cm_id_priv) {
1982			cm_dup_req_handler(work, cur_cm_id_priv);
1983			cm_deref_id(cur_cm_id_priv);
1984		}
1985		return NULL;
1986	}
1987
1988	/* Check for stale connections. */
1989	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1990	if (timewait_info) {
1991		cm_remove_remote(cm_id_priv);
1992		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1993					   timewait_info->work.remote_id);
1994
1995		spin_unlock_irq(&cm.lock);
1996		cm_issue_rej(work->port, work->mad_recv_wc,
1997			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1998			     NULL, 0);
1999		if (cur_cm_id_priv) {
2000			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2001			cm_deref_id(cur_cm_id_priv);
2002		}
2003		return NULL;
2004	}
2005
2006	/* Find matching listen request. */
2007	listen_cm_id_priv = cm_find_listen(
2008		cm_id_priv->id.device,
2009		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2010	if (!listen_cm_id_priv) {
2011		cm_remove_remote(cm_id_priv);
2012		spin_unlock_irq(&cm.lock);
2013		cm_issue_rej(work->port, work->mad_recv_wc,
2014			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2015			     NULL, 0);
2016		return NULL;
2017	}
2018	spin_unlock_irq(&cm.lock);
2019	return listen_cm_id_priv;
2020}
2021
2022/*
2023 * Work-around for inter-subnet connections.  If the LIDs are permissive,
2024 * we need to override the LID/SL data in the REQ with the LID information
2025 * in the work completion.
2026 */
2027static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2028{
2029	if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2030		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2031					req_msg)) == IB_LID_PERMISSIVE) {
2032			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2033				be16_to_cpu(ib_lid_be16(wc->slid)));
2034			IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2035		}
2036
2037		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2038					req_msg)) == IB_LID_PERMISSIVE)
2039			IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2040				wc->dlid_path_bits);
2041	}
2042
2043	if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2044		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2045					req_msg)) == IB_LID_PERMISSIVE) {
2046			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2047				be16_to_cpu(ib_lid_be16(wc->slid)));
2048			IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2049		}
2050
2051		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2052					req_msg)) == IB_LID_PERMISSIVE)
2053			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2054				wc->dlid_path_bits);
2055	}
2056}
2057
2058static int cm_req_handler(struct cm_work *work)
2059{
2060	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2061	struct cm_req_msg *req_msg;
2062	const struct ib_global_route *grh;
2063	const struct ib_gid_attr *gid_attr;
2064	int ret;
2065
2066	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2067
2068	cm_id_priv =
2069		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2070	if (IS_ERR(cm_id_priv))
2071		return PTR_ERR(cm_id_priv);
2072
2073	cm_id_priv->id.remote_id =
2074		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2075	cm_id_priv->id.service_id =
2076		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
 
2077	cm_id_priv->tid = req_msg->hdr.tid;
2078	cm_id_priv->timeout_ms = cm_convert_to_ms(
2079		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2080	cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2081	cm_id_priv->remote_qpn =
2082		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2083	cm_id_priv->initiator_depth =
2084		IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2085	cm_id_priv->responder_resources =
2086		IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2087	cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2088	cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2089	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2090	cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2091	cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2092	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2093
2094	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2095				      work->mad_recv_wc->recv_buf.grh,
2096				      &cm_id_priv->av);
2097	if (ret)
2098		goto destroy;
2099	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2100							    id.local_id);
2101	if (IS_ERR(cm_id_priv->timewait_info)) {
2102		ret = PTR_ERR(cm_id_priv->timewait_info);
2103		cm_id_priv->timewait_info = NULL;
2104		goto destroy;
2105	}
2106	cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2107	cm_id_priv->timewait_info->remote_ca_guid =
2108		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2109	cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2110
2111	/*
2112	 * Note that the ID pointer is not in the xarray at this point,
2113	 * so this set is only visible to the local thread.
2114	 */
2115	cm_id_priv->id.state = IB_CM_REQ_RCVD;
2116
2117	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2118	if (!listen_cm_id_priv) {
2119		trace_icm_no_listener_err(&cm_id_priv->id);
2120		cm_id_priv->id.state = IB_CM_IDLE;
2121		ret = -EINVAL;
2122		goto destroy;
2123	}
2124
2125	memset(&work->path[0], 0, sizeof(work->path[0]));
2126	if (cm_req_has_alt_path(req_msg))
2127		memset(&work->path[1], 0, sizeof(work->path[1]));
2128	grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2129	gid_attr = grh->sgid_attr;
2130
2131	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
2132		work->path[0].rec_type =
2133			sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2134	} else {
2135		cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2136		cm_path_set_rec_type(
2137			work->port->cm_dev->ib_device, work->port->port_num,
2138			&work->path[0],
2139			IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2140					req_msg));
2141	}
2142	if (cm_req_has_alt_path(req_msg))
2143		work->path[1].rec_type = work->path[0].rec_type;
2144	cm_format_paths_from_req(req_msg, &work->path[0],
2145				 &work->path[1], work->mad_recv_wc->wc);
2146	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2147		sa_path_set_dmac(&work->path[0],
2148				 cm_id_priv->av.ah_attr.roce.dmac);
2149	work->path[0].hop_limit = grh->hop_limit;
2150
2151	/* This destroy call is needed to pair with cm_init_av_for_response */
2152	cm_destroy_av(&cm_id_priv->av);
2153	ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
2154	if (ret) {
2155		int err;
2156
2157		err = rdma_query_gid(work->port->cm_dev->ib_device,
2158				     work->port->port_num, 0,
2159				     &work->path[0].sgid);
2160		if (err)
2161			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2162				       NULL, 0, NULL, 0);
2163		else
2164			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2165				       &work->path[0].sgid,
2166				       sizeof(work->path[0].sgid),
2167				       NULL, 0);
2168		goto rejected;
2169	}
2170	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
2171		cm_id_priv->av.dlid_datapath =
2172			IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
2173
2174	if (cm_req_has_alt_path(req_msg)) {
2175		ret = cm_init_av_by_path(&work->path[1], NULL,
2176					 &cm_id_priv->alt_av);
2177		if (ret) {
2178			ib_send_cm_rej(&cm_id_priv->id,
2179				       IB_CM_REJ_INVALID_ALT_GID,
2180				       &work->path[0].sgid,
2181				       sizeof(work->path[0].sgid), NULL, 0);
2182			goto rejected;
2183		}
2184	}
2185
2186	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2187	cm_id_priv->id.context = listen_cm_id_priv->id.context;
2188	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2189
2190	/* Now MAD handlers can see the new ID */
2191	spin_lock_irq(&cm_id_priv->lock);
2192	cm_finalize_id(cm_id_priv);
2193
2194	/* Refcount belongs to the event, pairs with cm_process_work() */
2195	refcount_inc(&cm_id_priv->refcount);
2196	cm_queue_work_unlock(cm_id_priv, work);
2197	/*
2198	 * Since this ID was just created and was not made visible to other MAD
2199	 * handlers until the cm_finalize_id() above we know that the
2200	 * cm_process_work() will deliver the event and the listen_cm_id
2201	 * embedded in the event can be derefed here.
2202	 */
2203	cm_deref_id(listen_cm_id_priv);
2204	return 0;
2205
2206rejected:
2207	cm_deref_id(listen_cm_id_priv);
2208destroy:
2209	ib_destroy_cm_id(&cm_id_priv->id);
2210	return ret;
2211}
2212
2213static void cm_format_rep(struct cm_rep_msg *rep_msg,
2214			  struct cm_id_private *cm_id_priv,
2215			  struct ib_cm_rep_param *param)
2216{
2217	cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2218			      param->ece.attr_mod);
2219	IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2220		be32_to_cpu(cm_id_priv->id.local_id));
2221	IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2222		be32_to_cpu(cm_id_priv->id.remote_id));
2223	IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2224	IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2225		param->responder_resources);
2226	IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2227		cm_id_priv->av.port->cm_dev->ack_delay);
2228	IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2229	IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2230	IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2231		be64_to_cpu(cm_id_priv->id.device->node_guid));
2232
2233	if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2234		IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2235			param->initiator_depth);
2236		IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2237			param->flow_control);
2238		IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2239		IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2240	} else {
2241		IBA_SET(CM_REP_SRQ, rep_msg, 1);
2242		IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2243	}
2244
2245	IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2246	IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2247	IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2248
2249	if (param->private_data && param->private_data_len)
2250		IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2251			    param->private_data_len);
2252}
2253
2254int ib_send_cm_rep(struct ib_cm_id *cm_id,
2255		   struct ib_cm_rep_param *param)
2256{
2257	struct cm_id_private *cm_id_priv;
2258	struct ib_mad_send_buf *msg;
2259	struct cm_rep_msg *rep_msg;
2260	unsigned long flags;
2261	int ret;
2262
2263	if (param->private_data &&
2264	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2265		return -EINVAL;
2266
2267	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2268	spin_lock_irqsave(&cm_id_priv->lock, flags);
2269	if (cm_id->state != IB_CM_REQ_RCVD &&
2270	    cm_id->state != IB_CM_MRA_REQ_SENT) {
2271		trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2272		ret = -EINVAL;
2273		goto out;
2274	}
2275
2276	msg = cm_alloc_priv_msg(cm_id_priv);
2277	if (IS_ERR(msg)) {
2278		ret = PTR_ERR(msg);
2279		goto out;
2280	}
2281
2282	rep_msg = (struct cm_rep_msg *) msg->mad;
2283	cm_format_rep(rep_msg, cm_id_priv, param);
2284	msg->timeout_ms = cm_id_priv->timeout_ms;
2285	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2286
2287	trace_icm_send_rep(cm_id);
2288	ret = ib_post_send_mad(msg, NULL);
2289	if (ret)
2290		goto out_free;
2291
2292	cm_id->state = IB_CM_REP_SENT;
2293	cm_id_priv->initiator_depth = param->initiator_depth;
2294	cm_id_priv->responder_resources = param->responder_resources;
2295	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2296	WARN_ONCE(param->qp_num & 0xFF000000,
2297		  "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2298		  param->qp_num);
2299	cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2300	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2301	return 0;
2302
2303out_free:
2304	cm_free_priv_msg(msg);
2305out:
2306	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2307	return ret;
2308}
2309EXPORT_SYMBOL(ib_send_cm_rep);
2310
2311static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2312			  struct cm_id_private *cm_id_priv,
2313			  const void *private_data,
2314			  u8 private_data_len)
2315{
2316	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2317	IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2318		be32_to_cpu(cm_id_priv->id.local_id));
2319	IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2320		be32_to_cpu(cm_id_priv->id.remote_id));
2321
2322	if (private_data && private_data_len)
2323		IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2324			    private_data_len);
2325}
2326
2327int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2328		   const void *private_data,
2329		   u8 private_data_len)
2330{
2331	struct cm_id_private *cm_id_priv;
2332	struct ib_mad_send_buf *msg;
2333	unsigned long flags;
2334	void *data;
2335	int ret;
2336
2337	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2338		return -EINVAL;
2339
2340	data = cm_copy_private_data(private_data, private_data_len);
2341	if (IS_ERR(data))
2342		return PTR_ERR(data);
2343
2344	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2345	spin_lock_irqsave(&cm_id_priv->lock, flags);
2346	if (cm_id->state != IB_CM_REP_RCVD &&
2347	    cm_id->state != IB_CM_MRA_REP_SENT) {
2348		trace_icm_send_cm_rtu_err(cm_id);
2349		ret = -EINVAL;
2350		goto error;
2351	}
2352
2353	msg = cm_alloc_msg(cm_id_priv);
2354	if (IS_ERR(msg)) {
2355		ret = PTR_ERR(msg);
2356		goto error;
2357	}
2358
2359	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2360		      private_data, private_data_len);
2361
2362	trace_icm_send_rtu(cm_id);
2363	ret = ib_post_send_mad(msg, NULL);
2364	if (ret) {
2365		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2366		cm_free_msg(msg);
2367		kfree(data);
2368		return ret;
2369	}
2370
2371	cm_id->state = IB_CM_ESTABLISHED;
2372	cm_set_private_data(cm_id_priv, data, private_data_len);
2373	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2374	return 0;
2375
2376error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2377	kfree(data);
2378	return ret;
2379}
2380EXPORT_SYMBOL(ib_send_cm_rtu);
2381
2382static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2383{
2384	struct cm_rep_msg *rep_msg;
2385	struct ib_cm_rep_event_param *param;
2386
2387	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2388	param = &work->cm_event.param.rep_rcvd;
2389	param->remote_ca_guid =
2390		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2391	param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2392	param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2393	param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2394	param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2395	param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2396	param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2397	param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2398	param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2399	param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2400	param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2401	param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2402	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2403	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2404	param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2405
2406	work->cm_event.private_data =
2407		IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2408}
2409
2410static void cm_dup_rep_handler(struct cm_work *work)
2411{
2412	struct cm_id_private *cm_id_priv;
2413	struct cm_rep_msg *rep_msg;
2414	struct ib_mad_send_buf *msg = NULL;
2415	int ret;
2416
2417	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2418	cm_id_priv = cm_acquire_id(
2419		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2420		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2421	if (!cm_id_priv)
2422		return;
2423
2424	atomic_long_inc(
2425		&work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
2426	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2427	if (ret)
2428		goto deref;
2429
2430	spin_lock_irq(&cm_id_priv->lock);
2431	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2432		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2433			      cm_id_priv->private_data,
2434			      cm_id_priv->private_data_len);
2435	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2436		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2437			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2438			      cm_id_priv->private_data,
2439			      cm_id_priv->private_data_len);
2440	else
2441		goto unlock;
2442	spin_unlock_irq(&cm_id_priv->lock);
2443
2444	trace_icm_send_dup_rep(&cm_id_priv->id);
2445	ret = ib_post_send_mad(msg, NULL);
2446	if (ret)
2447		goto free;
2448	goto deref;
2449
2450unlock:	spin_unlock_irq(&cm_id_priv->lock);
2451free:	cm_free_response_msg(msg);
2452deref:	cm_deref_id(cm_id_priv);
2453}
2454
2455static int cm_rep_handler(struct cm_work *work)
2456{
2457	struct cm_id_private *cm_id_priv;
2458	struct cm_rep_msg *rep_msg;
2459	int ret;
2460	struct cm_id_private *cur_cm_id_priv;
2461	struct cm_timewait_info *timewait_info;
2462
2463	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2464	cm_id_priv = cm_acquire_id(
2465		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2466	if (!cm_id_priv) {
2467		cm_dup_rep_handler(work);
2468		trace_icm_remote_no_priv_err(
2469			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2470		return -EINVAL;
2471	}
2472
2473	cm_format_rep_event(work, cm_id_priv->qp_type);
2474
2475	spin_lock_irq(&cm_id_priv->lock);
2476	switch (cm_id_priv->id.state) {
2477	case IB_CM_REQ_SENT:
2478	case IB_CM_MRA_REQ_RCVD:
2479		break;
2480	default:
2481		ret = -EINVAL;
2482		trace_icm_rep_unknown_err(
2483			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2484			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2485			cm_id_priv->id.state);
2486		spin_unlock_irq(&cm_id_priv->lock);
2487		goto error;
2488	}
2489
2490	cm_id_priv->timewait_info->work.remote_id =
2491		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2492	cm_id_priv->timewait_info->remote_ca_guid =
2493		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2494	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2495
2496	spin_lock(&cm.lock);
2497	/* Check for duplicate REP. */
2498	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2499		spin_unlock(&cm.lock);
2500		spin_unlock_irq(&cm_id_priv->lock);
2501		ret = -EINVAL;
2502		trace_icm_insert_failed_err(
2503			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2504		goto error;
2505	}
2506	/* Check for a stale connection. */
2507	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2508	if (timewait_info) {
2509		cm_remove_remote(cm_id_priv);
2510		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2511					   timewait_info->work.remote_id);
2512
2513		spin_unlock(&cm.lock);
2514		spin_unlock_irq(&cm_id_priv->lock);
2515		cm_issue_rej(work->port, work->mad_recv_wc,
2516			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2517			     NULL, 0);
2518		ret = -EINVAL;
2519		trace_icm_staleconn_err(
2520			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2521			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2522
2523		if (cur_cm_id_priv) {
2524			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2525			cm_deref_id(cur_cm_id_priv);
2526		}
2527
2528		goto error;
2529	}
2530	spin_unlock(&cm.lock);
2531
2532	cm_id_priv->id.state = IB_CM_REP_RCVD;
2533	cm_id_priv->id.remote_id =
2534		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2535	cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2536	cm_id_priv->initiator_depth =
2537		IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2538	cm_id_priv->responder_resources =
2539		IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2540	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2541	cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2542	cm_id_priv->target_ack_delay =
2543		IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2544	cm_id_priv->av.timeout =
2545			cm_ack_timeout(cm_id_priv->target_ack_delay,
2546				       cm_id_priv->av.timeout - 1);
2547	cm_id_priv->alt_av.timeout =
2548			cm_ack_timeout(cm_id_priv->target_ack_delay,
2549				       cm_id_priv->alt_av.timeout - 1);
2550
2551	ib_cancel_mad(cm_id_priv->msg);
2552	cm_queue_work_unlock(cm_id_priv, work);
2553	return 0;
2554
2555error:
2556	cm_deref_id(cm_id_priv);
2557	return ret;
2558}
2559
2560static int cm_establish_handler(struct cm_work *work)
2561{
2562	struct cm_id_private *cm_id_priv;
2563
2564	/* See comment in cm_establish about lookup. */
2565	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2566	if (!cm_id_priv)
2567		return -EINVAL;
2568
2569	spin_lock_irq(&cm_id_priv->lock);
2570	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2571		spin_unlock_irq(&cm_id_priv->lock);
2572		goto out;
2573	}
2574
2575	ib_cancel_mad(cm_id_priv->msg);
2576	cm_queue_work_unlock(cm_id_priv, work);
2577	return 0;
2578out:
2579	cm_deref_id(cm_id_priv);
2580	return -EINVAL;
2581}
2582
2583static int cm_rtu_handler(struct cm_work *work)
2584{
2585	struct cm_id_private *cm_id_priv;
2586	struct cm_rtu_msg *rtu_msg;
2587
2588	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2589	cm_id_priv = cm_acquire_id(
2590		cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2591		cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2592	if (!cm_id_priv)
2593		return -EINVAL;
2594
2595	work->cm_event.private_data =
2596		IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2597
2598	spin_lock_irq(&cm_id_priv->lock);
2599	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2600	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2601		spin_unlock_irq(&cm_id_priv->lock);
2602		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2603						     [CM_RTU_COUNTER]);
2604		goto out;
2605	}
2606	cm_id_priv->id.state = IB_CM_ESTABLISHED;
2607
2608	ib_cancel_mad(cm_id_priv->msg);
2609	cm_queue_work_unlock(cm_id_priv, work);
2610	return 0;
2611out:
2612	cm_deref_id(cm_id_priv);
2613	return -EINVAL;
2614}
2615
2616static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2617			  struct cm_id_private *cm_id_priv,
2618			  const void *private_data,
2619			  u8 private_data_len)
2620{
2621	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2622			  cm_form_tid(cm_id_priv));
2623	IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2624		be32_to_cpu(cm_id_priv->id.local_id));
2625	IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2626		be32_to_cpu(cm_id_priv->id.remote_id));
2627	IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2628		be32_to_cpu(cm_id_priv->remote_qpn));
2629
2630	if (private_data && private_data_len)
2631		IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2632			    private_data_len);
2633}
2634
2635static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2636			       const void *private_data, u8 private_data_len)
2637{
2638	struct ib_mad_send_buf *msg;
2639	int ret;
2640
2641	lockdep_assert_held(&cm_id_priv->lock);
2642
2643	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2644		return -EINVAL;
2645
2646	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2647		trace_icm_dreq_skipped(&cm_id_priv->id);
2648		return -EINVAL;
2649	}
2650
2651	if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2652	    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2653		ib_cancel_mad(cm_id_priv->msg);
2654
2655	msg = cm_alloc_priv_msg(cm_id_priv);
2656	if (IS_ERR(msg)) {
2657		cm_enter_timewait(cm_id_priv);
2658		return PTR_ERR(msg);
2659	}
2660
2661	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2662		       private_data, private_data_len);
2663	msg->timeout_ms = cm_id_priv->timeout_ms;
2664	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2665
2666	trace_icm_send_dreq(&cm_id_priv->id);
2667	ret = ib_post_send_mad(msg, NULL);
2668	if (ret) {
2669		cm_enter_timewait(cm_id_priv);
2670		cm_free_priv_msg(msg);
2671		return ret;
2672	}
2673
2674	cm_id_priv->id.state = IB_CM_DREQ_SENT;
2675	return 0;
2676}
2677
2678int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2679		    u8 private_data_len)
2680{
2681	struct cm_id_private *cm_id_priv =
2682		container_of(cm_id, struct cm_id_private, id);
2683	unsigned long flags;
2684	int ret;
2685
2686	spin_lock_irqsave(&cm_id_priv->lock, flags);
2687	ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2688	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2689	return ret;
2690}
2691EXPORT_SYMBOL(ib_send_cm_dreq);
2692
2693static void cm_format_drep(struct cm_drep_msg *drep_msg,
2694			  struct cm_id_private *cm_id_priv,
2695			  const void *private_data,
2696			  u8 private_data_len)
2697{
2698	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2699	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2700		be32_to_cpu(cm_id_priv->id.local_id));
2701	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2702		be32_to_cpu(cm_id_priv->id.remote_id));
2703
2704	if (private_data && private_data_len)
2705		IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2706			    private_data_len);
2707}
2708
2709static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2710			       void *private_data, u8 private_data_len)
2711{
2712	struct ib_mad_send_buf *msg;
2713	int ret;
2714
2715	lockdep_assert_held(&cm_id_priv->lock);
2716
2717	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2718		return -EINVAL;
2719
2720	if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2721		trace_icm_send_drep_err(&cm_id_priv->id);
2722		kfree(private_data);
2723		return -EINVAL;
2724	}
2725
2726	cm_set_private_data(cm_id_priv, private_data, private_data_len);
2727	cm_enter_timewait(cm_id_priv);
2728
2729	msg = cm_alloc_msg(cm_id_priv);
2730	if (IS_ERR(msg))
2731		return PTR_ERR(msg);
2732
2733	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2734		       private_data, private_data_len);
2735
2736	trace_icm_send_drep(&cm_id_priv->id);
2737	ret = ib_post_send_mad(msg, NULL);
2738	if (ret) {
2739		cm_free_msg(msg);
2740		return ret;
2741	}
2742	return 0;
2743}
2744
2745int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2746		    u8 private_data_len)
2747{
2748	struct cm_id_private *cm_id_priv =
2749		container_of(cm_id, struct cm_id_private, id);
2750	unsigned long flags;
2751	void *data;
2752	int ret;
2753
2754	data = cm_copy_private_data(private_data, private_data_len);
2755	if (IS_ERR(data))
2756		return PTR_ERR(data);
2757
2758	spin_lock_irqsave(&cm_id_priv->lock, flags);
2759	ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2760	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2761	return ret;
2762}
2763EXPORT_SYMBOL(ib_send_cm_drep);
2764
2765static int cm_issue_drep(struct cm_port *port,
2766			 struct ib_mad_recv_wc *mad_recv_wc)
2767{
2768	struct ib_mad_send_buf *msg = NULL;
2769	struct cm_dreq_msg *dreq_msg;
2770	struct cm_drep_msg *drep_msg;
2771	int ret;
2772
2773	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2774	if (ret)
2775		return ret;
2776
2777	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2778	drep_msg = (struct cm_drep_msg *) msg->mad;
2779
2780	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2781	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2782		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2783	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2784		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2785
2786	trace_icm_issue_drep(
2787		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2788		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2789	ret = ib_post_send_mad(msg, NULL);
2790	if (ret)
2791		cm_free_response_msg(msg);
2792
2793	return ret;
2794}
2795
2796static int cm_dreq_handler(struct cm_work *work)
2797{
2798	struct cm_id_private *cm_id_priv;
2799	struct cm_dreq_msg *dreq_msg;
2800	struct ib_mad_send_buf *msg = NULL;
2801
2802	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2803	cm_id_priv = cm_acquire_id(
2804		cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2805		cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2806	if (!cm_id_priv) {
2807		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2808						     [CM_DREQ_COUNTER]);
2809		cm_issue_drep(work->port, work->mad_recv_wc);
2810		trace_icm_no_priv_err(
2811			IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2812			IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2813		return -EINVAL;
2814	}
2815
2816	work->cm_event.private_data =
2817		IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2818
2819	spin_lock_irq(&cm_id_priv->lock);
2820	if (cm_id_priv->local_qpn !=
2821	    cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2822		goto unlock;
2823
2824	switch (cm_id_priv->id.state) {
2825	case IB_CM_REP_SENT:
2826	case IB_CM_DREQ_SENT:
2827	case IB_CM_MRA_REP_RCVD:
2828		ib_cancel_mad(cm_id_priv->msg);
2829		break;
2830	case IB_CM_ESTABLISHED:
2831		if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2832		    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2833			ib_cancel_mad(cm_id_priv->msg);
2834		break;
 
 
2835	case IB_CM_TIMEWAIT:
2836		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2837						     [CM_DREQ_COUNTER]);
2838		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2839		if (IS_ERR(msg))
2840			goto unlock;
2841
2842		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2843			       cm_id_priv->private_data,
2844			       cm_id_priv->private_data_len);
2845		spin_unlock_irq(&cm_id_priv->lock);
2846
2847		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2848		    ib_post_send_mad(msg, NULL))
2849			cm_free_response_msg(msg);
2850		goto deref;
2851	case IB_CM_DREQ_RCVD:
2852		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2853						     [CM_DREQ_COUNTER]);
2854		goto unlock;
2855	default:
2856		trace_icm_dreq_unknown_err(&cm_id_priv->id);
2857		goto unlock;
2858	}
2859	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2860	cm_id_priv->tid = dreq_msg->hdr.tid;
2861	cm_queue_work_unlock(cm_id_priv, work);
2862	return 0;
2863
2864unlock:	spin_unlock_irq(&cm_id_priv->lock);
2865deref:	cm_deref_id(cm_id_priv);
2866	return -EINVAL;
2867}
2868
2869static int cm_drep_handler(struct cm_work *work)
2870{
2871	struct cm_id_private *cm_id_priv;
2872	struct cm_drep_msg *drep_msg;
2873
2874	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2875	cm_id_priv = cm_acquire_id(
2876		cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2877		cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2878	if (!cm_id_priv)
2879		return -EINVAL;
2880
2881	work->cm_event.private_data =
2882		IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2883
2884	spin_lock_irq(&cm_id_priv->lock);
2885	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2886	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2887		spin_unlock_irq(&cm_id_priv->lock);
2888		goto out;
2889	}
2890	cm_enter_timewait(cm_id_priv);
2891
2892	ib_cancel_mad(cm_id_priv->msg);
2893	cm_queue_work_unlock(cm_id_priv, work);
2894	return 0;
2895out:
2896	cm_deref_id(cm_id_priv);
2897	return -EINVAL;
2898}
2899
2900static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2901			      enum ib_cm_rej_reason reason, void *ari,
2902			      u8 ari_length, const void *private_data,
2903			      u8 private_data_len)
2904{
2905	enum ib_cm_state state = cm_id_priv->id.state;
2906	struct ib_mad_send_buf *msg;
2907	int ret;
2908
2909	lockdep_assert_held(&cm_id_priv->lock);
2910
2911	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2912	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2913		return -EINVAL;
2914
2915	trace_icm_send_rej(&cm_id_priv->id, reason);
2916
2917	switch (state) {
2918	case IB_CM_REQ_SENT:
2919	case IB_CM_MRA_REQ_RCVD:
2920	case IB_CM_REQ_RCVD:
2921	case IB_CM_MRA_REQ_SENT:
2922	case IB_CM_REP_RCVD:
2923	case IB_CM_MRA_REP_SENT:
2924		cm_reset_to_idle(cm_id_priv);
2925		msg = cm_alloc_msg(cm_id_priv);
2926		if (IS_ERR(msg))
2927			return PTR_ERR(msg);
2928		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2929			      ari, ari_length, private_data, private_data_len,
2930			      state);
2931		break;
2932	case IB_CM_REP_SENT:
2933	case IB_CM_MRA_REP_RCVD:
2934		cm_enter_timewait(cm_id_priv);
2935		msg = cm_alloc_msg(cm_id_priv);
2936		if (IS_ERR(msg))
2937			return PTR_ERR(msg);
2938		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2939			      ari, ari_length, private_data, private_data_len,
2940			      state);
2941		break;
2942	default:
2943		trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2944		return -EINVAL;
2945	}
2946
 
2947	ret = ib_post_send_mad(msg, NULL);
2948	if (ret) {
2949		cm_free_msg(msg);
2950		return ret;
2951	}
2952
2953	return 0;
2954}
2955
2956int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2957		   void *ari, u8 ari_length, const void *private_data,
2958		   u8 private_data_len)
2959{
2960	struct cm_id_private *cm_id_priv =
2961		container_of(cm_id, struct cm_id_private, id);
2962	unsigned long flags;
2963	int ret;
2964
2965	spin_lock_irqsave(&cm_id_priv->lock, flags);
2966	ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2967				 private_data, private_data_len);
2968	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2969	return ret;
2970}
2971EXPORT_SYMBOL(ib_send_cm_rej);
2972
2973static void cm_format_rej_event(struct cm_work *work)
2974{
2975	struct cm_rej_msg *rej_msg;
2976	struct ib_cm_rej_event_param *param;
2977
2978	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2979	param = &work->cm_event.param.rej_rcvd;
2980	param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2981	param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2982	param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2983	work->cm_event.private_data =
2984		IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2985}
2986
2987static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2988{
2989	struct cm_id_private *cm_id_priv;
2990	__be32 remote_id;
2991
2992	remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
2993
2994	if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
2995		cm_id_priv = cm_find_remote_id(
2996			*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
2997			remote_id);
2998	} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
2999		   CM_MSG_RESPONSE_REQ)
3000		cm_id_priv = cm_acquire_id(
3001			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3002			0);
3003	else
3004		cm_id_priv = cm_acquire_id(
3005			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3006			remote_id);
3007
3008	return cm_id_priv;
3009}
3010
3011static int cm_rej_handler(struct cm_work *work)
3012{
3013	struct cm_id_private *cm_id_priv;
3014	struct cm_rej_msg *rej_msg;
3015
3016	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3017	cm_id_priv = cm_acquire_rejected_id(rej_msg);
3018	if (!cm_id_priv)
3019		return -EINVAL;
3020
3021	cm_format_rej_event(work);
3022
3023	spin_lock_irq(&cm_id_priv->lock);
3024	switch (cm_id_priv->id.state) {
3025	case IB_CM_REQ_SENT:
3026	case IB_CM_MRA_REQ_RCVD:
3027	case IB_CM_REP_SENT:
3028	case IB_CM_MRA_REP_RCVD:
3029		ib_cancel_mad(cm_id_priv->msg);
3030		fallthrough;
3031	case IB_CM_REQ_RCVD:
3032	case IB_CM_MRA_REQ_SENT:
3033		if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3034			cm_enter_timewait(cm_id_priv);
3035		else
3036			cm_reset_to_idle(cm_id_priv);
3037		break;
3038	case IB_CM_DREQ_SENT:
3039		ib_cancel_mad(cm_id_priv->msg);
3040		fallthrough;
3041	case IB_CM_REP_RCVD:
3042	case IB_CM_MRA_REP_SENT:
3043		cm_enter_timewait(cm_id_priv);
3044		break;
3045	case IB_CM_ESTABLISHED:
3046		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3047		    cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3048			if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3049				ib_cancel_mad(cm_id_priv->msg);
3050			cm_enter_timewait(cm_id_priv);
3051			break;
3052		}
3053		fallthrough;
3054	default:
3055		trace_icm_rej_unknown_err(&cm_id_priv->id);
3056		spin_unlock_irq(&cm_id_priv->lock);
3057		goto out;
3058	}
3059
3060	cm_queue_work_unlock(cm_id_priv, work);
3061	return 0;
3062out:
3063	cm_deref_id(cm_id_priv);
3064	return -EINVAL;
3065}
3066
3067int ib_send_cm_mra(struct ib_cm_id *cm_id,
3068		   u8 service_timeout,
3069		   const void *private_data,
3070		   u8 private_data_len)
3071{
3072	struct cm_id_private *cm_id_priv;
3073	struct ib_mad_send_buf *msg;
3074	enum ib_cm_state cm_state;
3075	enum ib_cm_lap_state lap_state;
3076	enum cm_msg_response msg_response;
3077	void *data;
3078	unsigned long flags;
3079	int ret;
3080
3081	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3082		return -EINVAL;
3083
3084	data = cm_copy_private_data(private_data, private_data_len);
3085	if (IS_ERR(data))
3086		return PTR_ERR(data);
3087
3088	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3089
3090	spin_lock_irqsave(&cm_id_priv->lock, flags);
3091	switch (cm_id_priv->id.state) {
3092	case IB_CM_REQ_RCVD:
3093		cm_state = IB_CM_MRA_REQ_SENT;
3094		lap_state = cm_id->lap_state;
3095		msg_response = CM_MSG_RESPONSE_REQ;
3096		break;
3097	case IB_CM_REP_RCVD:
3098		cm_state = IB_CM_MRA_REP_SENT;
3099		lap_state = cm_id->lap_state;
3100		msg_response = CM_MSG_RESPONSE_REP;
3101		break;
3102	case IB_CM_ESTABLISHED:
3103		if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3104			cm_state = cm_id->state;
3105			lap_state = IB_CM_MRA_LAP_SENT;
3106			msg_response = CM_MSG_RESPONSE_OTHER;
3107			break;
3108		}
3109		fallthrough;
3110	default:
3111		trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3112		ret = -EINVAL;
3113		goto error_unlock;
3114	}
3115
3116	if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3117		msg = cm_alloc_msg(cm_id_priv);
3118		if (IS_ERR(msg)) {
3119			ret = PTR_ERR(msg);
3120			goto error_unlock;
3121		}
3122
3123		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3124			      msg_response, service_timeout,
3125			      private_data, private_data_len);
3126		trace_icm_send_mra(cm_id);
3127		ret = ib_post_send_mad(msg, NULL);
3128		if (ret)
3129			goto error_free_msg;
3130	}
3131
3132	cm_id->state = cm_state;
3133	cm_id->lap_state = lap_state;
3134	cm_id_priv->service_timeout = service_timeout;
3135	cm_set_private_data(cm_id_priv, data, private_data_len);
3136	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3137	return 0;
3138
3139error_free_msg:
3140	cm_free_msg(msg);
3141error_unlock:
3142	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3143	kfree(data);
3144	return ret;
3145}
3146EXPORT_SYMBOL(ib_send_cm_mra);
3147
3148static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3149{
3150	switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3151	case CM_MSG_RESPONSE_REQ:
3152		return cm_acquire_id(
3153			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3154			0);
3155	case CM_MSG_RESPONSE_REP:
3156	case CM_MSG_RESPONSE_OTHER:
3157		return cm_acquire_id(
3158			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3159			cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3160	default:
3161		return NULL;
3162	}
3163}
3164
3165static int cm_mra_handler(struct cm_work *work)
3166{
3167	struct cm_id_private *cm_id_priv;
3168	struct cm_mra_msg *mra_msg;
3169	int timeout;
3170
3171	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3172	cm_id_priv = cm_acquire_mraed_id(mra_msg);
3173	if (!cm_id_priv)
3174		return -EINVAL;
3175
3176	work->cm_event.private_data =
3177		IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3178	work->cm_event.param.mra_rcvd.service_timeout =
3179		IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3180	timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3181		  cm_convert_to_ms(cm_id_priv->av.timeout);
3182
3183	spin_lock_irq(&cm_id_priv->lock);
3184	switch (cm_id_priv->id.state) {
3185	case IB_CM_REQ_SENT:
3186		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3187			    CM_MSG_RESPONSE_REQ ||
3188		    ib_modify_mad(cm_id_priv->msg, timeout))
3189			goto out;
3190		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3191		break;
3192	case IB_CM_REP_SENT:
3193		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3194			    CM_MSG_RESPONSE_REP ||
3195		    ib_modify_mad(cm_id_priv->msg, timeout))
3196			goto out;
3197		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3198		break;
3199	case IB_CM_ESTABLISHED:
3200		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3201			    CM_MSG_RESPONSE_OTHER ||
3202		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3203		    ib_modify_mad(cm_id_priv->msg, timeout)) {
3204			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3205				atomic_long_inc(
3206					&work->port->counters[CM_RECV_DUPLICATES]
3207							     [CM_MRA_COUNTER]);
3208			goto out;
3209		}
3210		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3211		break;
3212	case IB_CM_MRA_REQ_RCVD:
3213	case IB_CM_MRA_REP_RCVD:
3214		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3215						     [CM_MRA_COUNTER]);
3216		fallthrough;
3217	default:
3218		trace_icm_mra_unknown_err(&cm_id_priv->id);
3219		goto out;
3220	}
3221
3222	cm_id_priv->msg->context[1] = (void *) (unsigned long)
3223				      cm_id_priv->id.state;
3224	cm_queue_work_unlock(cm_id_priv, work);
3225	return 0;
3226out:
3227	spin_unlock_irq(&cm_id_priv->lock);
3228	cm_deref_id(cm_id_priv);
3229	return -EINVAL;
3230}
3231
3232static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3233					struct sa_path_rec *path)
3234{
3235	u32 lid;
3236
3237	if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3238		sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3239					       lap_msg));
3240		sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3241					       lap_msg));
3242	} else {
3243		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3244			CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3245		sa_path_set_dlid(path, lid);
3246
3247		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3248			CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3249		sa_path_set_slid(path, lid);
3250	}
3251}
3252
3253static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3254				    struct sa_path_rec *path,
3255				    struct cm_lap_msg *lap_msg)
3256{
3257	path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3258	path->sgid =
3259		*IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3260	path->flow_label =
3261		cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3262	path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3263	path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3264	path->reversible = 1;
3265	path->pkey = cm_id_priv->pkey;
3266	path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3267	path->mtu_selector = IB_SA_EQ;
3268	path->mtu = cm_id_priv->path_mtu;
3269	path->rate_selector = IB_SA_EQ;
3270	path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3271	path->packet_life_time_selector = IB_SA_EQ;
3272	path->packet_life_time =
3273		IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3274	path->packet_life_time -= (path->packet_life_time > 0);
3275	cm_format_path_lid_from_lap(lap_msg, path);
3276}
3277
3278static int cm_lap_handler(struct cm_work *work)
3279{
3280	struct cm_id_private *cm_id_priv;
3281	struct cm_lap_msg *lap_msg;
3282	struct ib_cm_lap_event_param *param;
3283	struct ib_mad_send_buf *msg = NULL;
3284	struct rdma_ah_attr ah_attr;
3285	struct cm_av alt_av = {};
3286	int ret;
3287
3288	/* Currently Alternate path messages are not supported for
3289	 * RoCE link layer.
3290	 */
3291	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3292			       work->port->port_num))
3293		return -EINVAL;
3294
3295	/* todo: verify LAP request and send reject APR if invalid. */
3296	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3297	cm_id_priv = cm_acquire_id(
3298		cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3299		cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3300	if (!cm_id_priv)
3301		return -EINVAL;
3302
3303	param = &work->cm_event.param.lap_rcvd;
3304	memset(&work->path[0], 0, sizeof(work->path[1]));
3305	cm_path_set_rec_type(work->port->cm_dev->ib_device,
3306			     work->port->port_num, &work->path[0],
3307			     IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3308					     lap_msg));
3309	param->alternate_path = &work->path[0];
3310	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3311	work->cm_event.private_data =
3312		IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3313
3314	ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
3315				      work->port->port_num,
3316				      work->mad_recv_wc->wc,
3317				      work->mad_recv_wc->recv_buf.grh,
3318				      &ah_attr);
3319	if (ret)
3320		goto deref;
3321
3322	ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
3323	if (ret) {
3324		rdma_destroy_ah_attr(&ah_attr);
3325		goto deref;
3326	}
3327
3328	spin_lock_irq(&cm_id_priv->lock);
3329	cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3330			   &ah_attr, &cm_id_priv->av);
3331	cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
3332
3333	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3334		goto unlock;
3335
3336	switch (cm_id_priv->id.lap_state) {
3337	case IB_CM_LAP_UNINIT:
3338	case IB_CM_LAP_IDLE:
3339		break;
3340	case IB_CM_MRA_LAP_SENT:
3341		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3342						     [CM_LAP_COUNTER]);
3343		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3344		if (IS_ERR(msg))
3345			goto unlock;
3346
3347		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3348			      CM_MSG_RESPONSE_OTHER,
3349			      cm_id_priv->service_timeout,
3350			      cm_id_priv->private_data,
3351			      cm_id_priv->private_data_len);
3352		spin_unlock_irq(&cm_id_priv->lock);
3353
3354		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3355		    ib_post_send_mad(msg, NULL))
3356			cm_free_response_msg(msg);
3357		goto deref;
3358	case IB_CM_LAP_RCVD:
3359		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3360						     [CM_LAP_COUNTER]);
3361		goto unlock;
3362	default:
3363		goto unlock;
3364	}
3365
3366	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3367	cm_id_priv->tid = lap_msg->hdr.tid;
3368	cm_queue_work_unlock(cm_id_priv, work);
3369	return 0;
3370
3371unlock:	spin_unlock_irq(&cm_id_priv->lock);
3372deref:	cm_deref_id(cm_id_priv);
3373	return -EINVAL;
3374}
3375
3376static int cm_apr_handler(struct cm_work *work)
3377{
3378	struct cm_id_private *cm_id_priv;
3379	struct cm_apr_msg *apr_msg;
3380
3381	/* Currently Alternate path messages are not supported for
3382	 * RoCE link layer.
3383	 */
3384	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3385			       work->port->port_num))
3386		return -EINVAL;
3387
3388	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3389	cm_id_priv = cm_acquire_id(
3390		cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3391		cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3392	if (!cm_id_priv)
3393		return -EINVAL; /* Unmatched reply. */
3394
3395	work->cm_event.param.apr_rcvd.ap_status =
3396		IBA_GET(CM_APR_AR_STATUS, apr_msg);
3397	work->cm_event.param.apr_rcvd.apr_info =
3398		IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3399	work->cm_event.param.apr_rcvd.info_len =
3400		IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3401	work->cm_event.private_data =
3402		IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3403
3404	spin_lock_irq(&cm_id_priv->lock);
3405	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3406	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3407	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3408		spin_unlock_irq(&cm_id_priv->lock);
3409		goto out;
3410	}
3411	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3412	ib_cancel_mad(cm_id_priv->msg);
3413	cm_queue_work_unlock(cm_id_priv, work);
3414	return 0;
3415out:
3416	cm_deref_id(cm_id_priv);
3417	return -EINVAL;
3418}
3419
3420static int cm_timewait_handler(struct cm_work *work)
3421{
3422	struct cm_timewait_info *timewait_info;
3423	struct cm_id_private *cm_id_priv;
3424
3425	timewait_info = container_of(work, struct cm_timewait_info, work);
3426	spin_lock_irq(&cm.lock);
3427	list_del(&timewait_info->list);
3428	spin_unlock_irq(&cm.lock);
3429
3430	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3431				   timewait_info->work.remote_id);
3432	if (!cm_id_priv)
3433		return -EINVAL;
3434
3435	spin_lock_irq(&cm_id_priv->lock);
3436	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3437	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3438		spin_unlock_irq(&cm_id_priv->lock);
3439		goto out;
3440	}
3441	cm_id_priv->id.state = IB_CM_IDLE;
3442	cm_queue_work_unlock(cm_id_priv, work);
3443	return 0;
3444out:
3445	cm_deref_id(cm_id_priv);
3446	return -EINVAL;
3447}
3448
3449static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3450			       struct cm_id_private *cm_id_priv,
3451			       struct ib_cm_sidr_req_param *param)
3452{
3453	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3454			  cm_form_tid(cm_id_priv));
3455	IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3456		be32_to_cpu(cm_id_priv->id.local_id));
3457	IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3458		be16_to_cpu(param->path->pkey));
3459	IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3460		be64_to_cpu(param->service_id));
3461
3462	if (param->private_data && param->private_data_len)
3463		IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3464			    param->private_data, param->private_data_len);
3465}
3466
3467int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3468			struct ib_cm_sidr_req_param *param)
3469{
3470	struct cm_id_private *cm_id_priv;
3471	struct ib_mad_send_buf *msg;
3472	struct cm_av av = {};
3473	unsigned long flags;
3474	int ret;
3475
3476	if (!param->path || (param->private_data &&
3477	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3478		return -EINVAL;
3479
3480	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3481	ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
3482	if (ret)
3483		return ret;
3484
3485	spin_lock_irqsave(&cm_id_priv->lock, flags);
3486	cm_move_av_from_path(&cm_id_priv->av, &av);
3487	cm_id->service_id = param->service_id;
 
3488	cm_id_priv->timeout_ms = param->timeout_ms;
3489	cm_id_priv->max_cm_retries = param->max_cm_retries;
3490	if (cm_id->state != IB_CM_IDLE) {
3491		ret = -EINVAL;
3492		goto out_unlock;
3493	}
3494
3495	msg = cm_alloc_priv_msg(cm_id_priv);
3496	if (IS_ERR(msg)) {
3497		ret = PTR_ERR(msg);
3498		goto out_unlock;
3499	}
3500
3501	cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
3502			   param);
3503	msg->timeout_ms = cm_id_priv->timeout_ms;
3504	msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
3505
3506	trace_icm_send_sidr_req(&cm_id_priv->id);
3507	ret = ib_post_send_mad(msg, NULL);
3508	if (ret)
3509		goto out_free;
3510	cm_id->state = IB_CM_SIDR_REQ_SENT;
3511	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3512	return 0;
3513out_free:
3514	cm_free_priv_msg(msg);
3515out_unlock:
3516	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3517	return ret;
3518}
3519EXPORT_SYMBOL(ib_send_cm_sidr_req);
3520
3521static void cm_format_sidr_req_event(struct cm_work *work,
3522				     const struct cm_id_private *rx_cm_id,
3523				     struct ib_cm_id *listen_id)
3524{
3525	struct cm_sidr_req_msg *sidr_req_msg;
3526	struct ib_cm_sidr_req_event_param *param;
3527
3528	sidr_req_msg = (struct cm_sidr_req_msg *)
3529				work->mad_recv_wc->recv_buf.mad;
3530	param = &work->cm_event.param.sidr_req_rcvd;
3531	param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3532	param->listen_id = listen_id;
3533	param->service_id =
3534		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3535	param->bth_pkey = cm_get_bth_pkey(work);
3536	param->port = work->port->port_num;
3537	param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3538	work->cm_event.private_data =
3539		IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3540}
3541
3542static int cm_sidr_req_handler(struct cm_work *work)
3543{
3544	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3545	struct cm_sidr_req_msg *sidr_req_msg;
3546	struct ib_wc *wc;
3547	int ret;
3548
3549	cm_id_priv =
3550		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3551	if (IS_ERR(cm_id_priv))
3552		return PTR_ERR(cm_id_priv);
3553
3554	/* Record SGID/SLID and request ID for lookup. */
3555	sidr_req_msg = (struct cm_sidr_req_msg *)
3556				work->mad_recv_wc->recv_buf.mad;
3557
3558	cm_id_priv->id.remote_id =
3559		cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3560	cm_id_priv->id.service_id =
3561		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
 
3562	cm_id_priv->tid = sidr_req_msg->hdr.tid;
3563
3564	wc = work->mad_recv_wc->wc;
3565	cm_id_priv->sidr_slid = wc->slid;
3566	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3567				      work->mad_recv_wc->recv_buf.grh,
3568				      &cm_id_priv->av);
3569	if (ret)
3570		goto out;
3571
3572	spin_lock_irq(&cm.lock);
3573	listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3574	if (listen_cm_id_priv) {
3575		spin_unlock_irq(&cm.lock);
3576		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3577						     [CM_SIDR_REQ_COUNTER]);
3578		goto out; /* Duplicate message. */
3579	}
3580	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3581	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3582					   cm_id_priv->id.service_id);
3583	if (!listen_cm_id_priv) {
3584		spin_unlock_irq(&cm.lock);
3585		ib_send_cm_sidr_rep(&cm_id_priv->id,
3586				    &(struct ib_cm_sidr_rep_param){
3587					    .status = IB_SIDR_UNSUPPORTED });
3588		goto out; /* No match. */
3589	}
3590	spin_unlock_irq(&cm.lock);
3591
3592	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3593	cm_id_priv->id.context = listen_cm_id_priv->id.context;
3594
3595	/*
3596	 * A SIDR ID does not need to be in the xarray since it does not receive
3597	 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3598	 * not enter timewait.
3599	 */
3600
3601	cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3602	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3603	cm_free_work(work);
3604	/*
3605	 * A pointer to the listen_cm_id is held in the event, so this deref
3606	 * must be after the event is delivered above.
3607	 */
3608	cm_deref_id(listen_cm_id_priv);
3609	if (ret)
3610		cm_destroy_id(&cm_id_priv->id, ret);
3611	return 0;
3612out:
3613	ib_destroy_cm_id(&cm_id_priv->id);
3614	return -EINVAL;
3615}
3616
3617static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3618			       struct cm_id_private *cm_id_priv,
3619			       struct ib_cm_sidr_rep_param *param)
3620{
3621	cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3622			      cm_id_priv->tid, param->ece.attr_mod);
3623	IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3624		be32_to_cpu(cm_id_priv->id.remote_id));
3625	IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3626	IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3627	IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3628		be64_to_cpu(cm_id_priv->id.service_id));
3629	IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3630	IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3631		param->ece.vendor_id & 0xFF);
3632	IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3633		(param->ece.vendor_id >> 8) & 0xFF);
3634
3635	if (param->info && param->info_length)
3636		IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3637			    param->info, param->info_length);
3638
3639	if (param->private_data && param->private_data_len)
3640		IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3641			    param->private_data, param->private_data_len);
3642}
3643
3644static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3645				   struct ib_cm_sidr_rep_param *param)
3646{
3647	struct ib_mad_send_buf *msg;
3648	unsigned long flags;
3649	int ret;
3650
3651	lockdep_assert_held(&cm_id_priv->lock);
3652
3653	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3654	    (param->private_data &&
3655	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3656		return -EINVAL;
3657
3658	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3659		return -EINVAL;
3660
3661	msg = cm_alloc_msg(cm_id_priv);
3662	if (IS_ERR(msg))
3663		return PTR_ERR(msg);
3664
3665	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3666			   param);
3667	trace_icm_send_sidr_rep(&cm_id_priv->id);
3668	ret = ib_post_send_mad(msg, NULL);
3669	if (ret) {
3670		cm_free_msg(msg);
3671		return ret;
3672	}
3673	cm_id_priv->id.state = IB_CM_IDLE;
3674	spin_lock_irqsave(&cm.lock, flags);
3675	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3676		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3677		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3678	}
3679	spin_unlock_irqrestore(&cm.lock, flags);
3680	return 0;
3681}
3682
3683int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3684			struct ib_cm_sidr_rep_param *param)
3685{
3686	struct cm_id_private *cm_id_priv =
3687		container_of(cm_id, struct cm_id_private, id);
3688	unsigned long flags;
3689	int ret;
3690
3691	spin_lock_irqsave(&cm_id_priv->lock, flags);
3692	ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3693	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3694	return ret;
3695}
3696EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3697
3698static void cm_format_sidr_rep_event(struct cm_work *work,
3699				     const struct cm_id_private *cm_id_priv)
3700{
3701	struct cm_sidr_rep_msg *sidr_rep_msg;
3702	struct ib_cm_sidr_rep_event_param *param;
3703
3704	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3705				work->mad_recv_wc->recv_buf.mad;
3706	param = &work->cm_event.param.sidr_rep_rcvd;
3707	param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3708	param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3709	param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3710	param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3711				      sidr_rep_msg);
3712	param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3713				  sidr_rep_msg);
3714	param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3715	work->cm_event.private_data =
3716		IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3717}
3718
3719static int cm_sidr_rep_handler(struct cm_work *work)
3720{
3721	struct cm_sidr_rep_msg *sidr_rep_msg;
3722	struct cm_id_private *cm_id_priv;
3723
3724	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3725				work->mad_recv_wc->recv_buf.mad;
3726	cm_id_priv = cm_acquire_id(
3727		cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3728	if (!cm_id_priv)
3729		return -EINVAL; /* Unmatched reply. */
3730
3731	spin_lock_irq(&cm_id_priv->lock);
3732	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3733		spin_unlock_irq(&cm_id_priv->lock);
3734		goto out;
3735	}
3736	cm_id_priv->id.state = IB_CM_IDLE;
3737	ib_cancel_mad(cm_id_priv->msg);
3738	spin_unlock_irq(&cm_id_priv->lock);
3739
3740	cm_format_sidr_rep_event(work, cm_id_priv);
3741	cm_process_work(cm_id_priv, work);
3742	return 0;
3743out:
3744	cm_deref_id(cm_id_priv);
3745	return -EINVAL;
3746}
3747
3748static void cm_process_send_error(struct cm_id_private *cm_id_priv,
3749				  struct ib_mad_send_buf *msg,
3750				  enum ib_cm_state state,
3751				  enum ib_wc_status wc_status)
3752{
3753	struct ib_cm_event cm_event = {};
3754	int ret;
3755
3756	/* Discard old sends or ones without a response. */
3757	spin_lock_irq(&cm_id_priv->lock);
3758	if (msg != cm_id_priv->msg) {
3759		spin_unlock_irq(&cm_id_priv->lock);
3760		cm_free_msg(msg);
3761		return;
3762	}
3763	cm_free_priv_msg(msg);
3764
3765	if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
3766	    wc_status == IB_WC_WR_FLUSH_ERR)
3767		goto out_unlock;
3768
3769	trace_icm_mad_send_err(state, wc_status);
3770	switch (state) {
3771	case IB_CM_REQ_SENT:
3772	case IB_CM_MRA_REQ_RCVD:
3773		cm_reset_to_idle(cm_id_priv);
3774		cm_event.event = IB_CM_REQ_ERROR;
3775		break;
3776	case IB_CM_REP_SENT:
3777	case IB_CM_MRA_REP_RCVD:
3778		cm_reset_to_idle(cm_id_priv);
3779		cm_event.event = IB_CM_REP_ERROR;
3780		break;
3781	case IB_CM_DREQ_SENT:
3782		cm_enter_timewait(cm_id_priv);
3783		cm_event.event = IB_CM_DREQ_ERROR;
3784		break;
3785	case IB_CM_SIDR_REQ_SENT:
3786		cm_id_priv->id.state = IB_CM_IDLE;
3787		cm_event.event = IB_CM_SIDR_REQ_ERROR;
3788		break;
3789	default:
3790		goto out_unlock;
3791	}
3792	spin_unlock_irq(&cm_id_priv->lock);
3793	cm_event.param.send_status = wc_status;
3794
3795	/* No other events can occur on the cm_id at this point. */
3796	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3797	if (ret)
3798		ib_destroy_cm_id(&cm_id_priv->id);
3799	return;
3800out_unlock:
3801	spin_unlock_irq(&cm_id_priv->lock);
3802}
3803
3804static void cm_send_handler(struct ib_mad_agent *mad_agent,
3805			    struct ib_mad_send_wc *mad_send_wc)
3806{
3807	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3808	struct cm_id_private *cm_id_priv = msg->context[0];
3809	enum ib_cm_state state =
3810		(enum ib_cm_state)(unsigned long)msg->context[1];
3811	struct cm_port *port;
3812	u16 attr_index;
3813
3814	port = mad_agent->context;
3815	attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3816				  msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3817
3818	/*
3819	 * If the send was in response to a received message (context[0] is not
3820	 * set to a cm_id), and is not a REJ, then it is a send that was
3821	 * manually retried.
3822	 */
3823	if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
3824		msg->retries = 1;
3825
3826	atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
3827	if (msg->retries)
3828		atomic_long_add(msg->retries,
3829				&port->counters[CM_XMIT_RETRIES][attr_index]);
3830
3831	if (cm_id_priv)
3832		cm_process_send_error(cm_id_priv, msg, state,
3833				      mad_send_wc->status);
3834	else
3835		cm_free_response_msg(msg);
3836}
3837
3838static void cm_work_handler(struct work_struct *_work)
3839{
3840	struct cm_work *work = container_of(_work, struct cm_work, work.work);
3841	int ret;
3842
3843	switch (work->cm_event.event) {
3844	case IB_CM_REQ_RECEIVED:
3845		ret = cm_req_handler(work);
3846		break;
3847	case IB_CM_MRA_RECEIVED:
3848		ret = cm_mra_handler(work);
3849		break;
3850	case IB_CM_REJ_RECEIVED:
3851		ret = cm_rej_handler(work);
3852		break;
3853	case IB_CM_REP_RECEIVED:
3854		ret = cm_rep_handler(work);
3855		break;
3856	case IB_CM_RTU_RECEIVED:
3857		ret = cm_rtu_handler(work);
3858		break;
3859	case IB_CM_USER_ESTABLISHED:
3860		ret = cm_establish_handler(work);
3861		break;
3862	case IB_CM_DREQ_RECEIVED:
3863		ret = cm_dreq_handler(work);
3864		break;
3865	case IB_CM_DREP_RECEIVED:
3866		ret = cm_drep_handler(work);
3867		break;
3868	case IB_CM_SIDR_REQ_RECEIVED:
3869		ret = cm_sidr_req_handler(work);
3870		break;
3871	case IB_CM_SIDR_REP_RECEIVED:
3872		ret = cm_sidr_rep_handler(work);
3873		break;
3874	case IB_CM_LAP_RECEIVED:
3875		ret = cm_lap_handler(work);
3876		break;
3877	case IB_CM_APR_RECEIVED:
3878		ret = cm_apr_handler(work);
3879		break;
3880	case IB_CM_TIMEWAIT_EXIT:
3881		ret = cm_timewait_handler(work);
3882		break;
3883	default:
3884		trace_icm_handler_err(work->cm_event.event);
3885		ret = -EINVAL;
3886		break;
3887	}
3888	if (ret)
3889		cm_free_work(work);
3890}
3891
3892static int cm_establish(struct ib_cm_id *cm_id)
3893{
3894	struct cm_id_private *cm_id_priv;
3895	struct cm_work *work;
3896	unsigned long flags;
3897	int ret = 0;
3898	struct cm_device *cm_dev;
3899
3900	cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3901	if (!cm_dev)
3902		return -ENODEV;
3903
3904	work = kmalloc(sizeof *work, GFP_ATOMIC);
3905	if (!work)
3906		return -ENOMEM;
3907
3908	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3909	spin_lock_irqsave(&cm_id_priv->lock, flags);
3910	switch (cm_id->state) {
3911	case IB_CM_REP_SENT:
3912	case IB_CM_MRA_REP_RCVD:
3913		cm_id->state = IB_CM_ESTABLISHED;
3914		break;
3915	case IB_CM_ESTABLISHED:
3916		ret = -EISCONN;
3917		break;
3918	default:
3919		trace_icm_establish_err(cm_id);
3920		ret = -EINVAL;
3921		break;
3922	}
3923	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3924
3925	if (ret) {
3926		kfree(work);
3927		goto out;
3928	}
3929
3930	/*
3931	 * The CM worker thread may try to destroy the cm_id before it
3932	 * can execute this work item.  To prevent potential deadlock,
3933	 * we need to find the cm_id once we're in the context of the
3934	 * worker thread, rather than holding a reference on it.
3935	 */
3936	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3937	work->local_id = cm_id->local_id;
3938	work->remote_id = cm_id->remote_id;
3939	work->mad_recv_wc = NULL;
3940	work->cm_event.event = IB_CM_USER_ESTABLISHED;
3941
3942	/* Check if the device started its remove_one */
3943	spin_lock_irqsave(&cm.lock, flags);
3944	if (!cm_dev->going_down) {
3945		queue_delayed_work(cm.wq, &work->work, 0);
3946	} else {
3947		kfree(work);
3948		ret = -ENODEV;
3949	}
3950	spin_unlock_irqrestore(&cm.lock, flags);
3951
3952out:
3953	return ret;
3954}
3955
3956static int cm_migrate(struct ib_cm_id *cm_id)
3957{
3958	struct cm_id_private *cm_id_priv;
3959	unsigned long flags;
3960	int ret = 0;
3961
3962	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3963	spin_lock_irqsave(&cm_id_priv->lock, flags);
3964	if (cm_id->state == IB_CM_ESTABLISHED &&
3965	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3966	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
3967		cm_id->lap_state = IB_CM_LAP_IDLE;
3968		cm_id_priv->av = cm_id_priv->alt_av;
3969	} else
3970		ret = -EINVAL;
3971	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3972
3973	return ret;
3974}
3975
3976int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3977{
3978	int ret;
3979
3980	switch (event) {
3981	case IB_EVENT_COMM_EST:
3982		ret = cm_establish(cm_id);
3983		break;
3984	case IB_EVENT_PATH_MIG:
3985		ret = cm_migrate(cm_id);
3986		break;
3987	default:
3988		ret = -EINVAL;
3989	}
3990	return ret;
3991}
3992EXPORT_SYMBOL(ib_cm_notify);
3993
3994static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3995			    struct ib_mad_send_buf *send_buf,
3996			    struct ib_mad_recv_wc *mad_recv_wc)
3997{
3998	struct cm_port *port = mad_agent->context;
3999	struct cm_work *work;
4000	enum ib_cm_event_type event;
4001	bool alt_path = false;
4002	u16 attr_id;
4003	int paths = 0;
4004	int going_down = 0;
4005
4006	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4007	case CM_REQ_ATTR_ID:
4008		alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4009						mad_recv_wc->recv_buf.mad);
4010		paths = 1 + (alt_path != 0);
4011		event = IB_CM_REQ_RECEIVED;
4012		break;
4013	case CM_MRA_ATTR_ID:
4014		event = IB_CM_MRA_RECEIVED;
4015		break;
4016	case CM_REJ_ATTR_ID:
4017		event = IB_CM_REJ_RECEIVED;
4018		break;
4019	case CM_REP_ATTR_ID:
4020		event = IB_CM_REP_RECEIVED;
4021		break;
4022	case CM_RTU_ATTR_ID:
4023		event = IB_CM_RTU_RECEIVED;
4024		break;
4025	case CM_DREQ_ATTR_ID:
4026		event = IB_CM_DREQ_RECEIVED;
4027		break;
4028	case CM_DREP_ATTR_ID:
4029		event = IB_CM_DREP_RECEIVED;
4030		break;
4031	case CM_SIDR_REQ_ATTR_ID:
4032		event = IB_CM_SIDR_REQ_RECEIVED;
4033		break;
4034	case CM_SIDR_REP_ATTR_ID:
4035		event = IB_CM_SIDR_REP_RECEIVED;
4036		break;
4037	case CM_LAP_ATTR_ID:
4038		paths = 1;
4039		event = IB_CM_LAP_RECEIVED;
4040		break;
4041	case CM_APR_ATTR_ID:
4042		event = IB_CM_APR_RECEIVED;
4043		break;
4044	default:
4045		ib_free_recv_mad(mad_recv_wc);
4046		return;
4047	}
4048
4049	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4050	atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
4051
4052	work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4053	if (!work) {
4054		ib_free_recv_mad(mad_recv_wc);
4055		return;
4056	}
4057
4058	INIT_DELAYED_WORK(&work->work, cm_work_handler);
4059	work->cm_event.event = event;
4060	work->mad_recv_wc = mad_recv_wc;
4061	work->port = port;
4062
4063	/* Check if the device started its remove_one */
4064	spin_lock_irq(&cm.lock);
4065	if (!port->cm_dev->going_down)
4066		queue_delayed_work(cm.wq, &work->work, 0);
4067	else
4068		going_down = 1;
4069	spin_unlock_irq(&cm.lock);
4070
4071	if (going_down) {
4072		kfree(work);
4073		ib_free_recv_mad(mad_recv_wc);
4074	}
4075}
4076
4077static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4078				struct ib_qp_attr *qp_attr,
4079				int *qp_attr_mask)
4080{
4081	unsigned long flags;
4082	int ret;
4083
4084	spin_lock_irqsave(&cm_id_priv->lock, flags);
4085	switch (cm_id_priv->id.state) {
4086	case IB_CM_REQ_SENT:
4087	case IB_CM_MRA_REQ_RCVD:
4088	case IB_CM_REQ_RCVD:
4089	case IB_CM_MRA_REQ_SENT:
4090	case IB_CM_REP_RCVD:
4091	case IB_CM_MRA_REP_SENT:
4092	case IB_CM_REP_SENT:
4093	case IB_CM_MRA_REP_RCVD:
4094	case IB_CM_ESTABLISHED:
4095		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4096				IB_QP_PKEY_INDEX | IB_QP_PORT;
4097		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4098		if (cm_id_priv->responder_resources) {
4099			struct ib_device *ib_dev = cm_id_priv->id.device;
4100			u64 support_flush = ib_dev->attrs.device_cap_flags &
4101			  (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT);
4102			u32 flushable = support_flush ?
4103					(IB_ACCESS_FLUSH_GLOBAL |
4104					 IB_ACCESS_FLUSH_PERSISTENT) : 0;
4105
4106			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4107						    IB_ACCESS_REMOTE_ATOMIC |
4108						    flushable;
4109		}
4110		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4111		if (cm_id_priv->av.port)
4112			qp_attr->port_num = cm_id_priv->av.port->port_num;
4113		ret = 0;
4114		break;
4115	default:
4116		trace_icm_qp_init_err(&cm_id_priv->id);
4117		ret = -EINVAL;
4118		break;
4119	}
4120	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4121	return ret;
4122}
4123
4124static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4125			       struct ib_qp_attr *qp_attr,
4126			       int *qp_attr_mask)
4127{
4128	unsigned long flags;
4129	int ret;
4130
4131	spin_lock_irqsave(&cm_id_priv->lock, flags);
4132	switch (cm_id_priv->id.state) {
4133	case IB_CM_REQ_RCVD:
4134	case IB_CM_MRA_REQ_SENT:
4135	case IB_CM_REP_RCVD:
4136	case IB_CM_MRA_REP_SENT:
4137	case IB_CM_REP_SENT:
4138	case IB_CM_MRA_REP_RCVD:
4139	case IB_CM_ESTABLISHED:
4140		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4141				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4142		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4143		if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
4144		    cm_id_priv->av.dlid_datapath &&
4145		    (cm_id_priv->av.dlid_datapath != 0xffff))
4146			qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
4147		qp_attr->path_mtu = cm_id_priv->path_mtu;
4148		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4149		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4150		if (cm_id_priv->qp_type == IB_QPT_RC ||
4151		    cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4152			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4153					 IB_QP_MIN_RNR_TIMER;
4154			qp_attr->max_dest_rd_atomic =
4155					cm_id_priv->responder_resources;
4156			qp_attr->min_rnr_timer = 0;
4157		}
4158		if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
4159		    cm_id_priv->alt_av.port) {
4160			*qp_attr_mask |= IB_QP_ALT_PATH;
4161			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4162			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4163			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4164			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4165		}
4166		ret = 0;
4167		break;
4168	default:
4169		trace_icm_qp_rtr_err(&cm_id_priv->id);
4170		ret = -EINVAL;
4171		break;
4172	}
4173	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4174	return ret;
4175}
4176
4177static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4178			       struct ib_qp_attr *qp_attr,
4179			       int *qp_attr_mask)
4180{
4181	unsigned long flags;
4182	int ret;
4183
4184	spin_lock_irqsave(&cm_id_priv->lock, flags);
4185	switch (cm_id_priv->id.state) {
4186	/* Allow transition to RTS before sending REP */
4187	case IB_CM_REQ_RCVD:
4188	case IB_CM_MRA_REQ_SENT:
4189
4190	case IB_CM_REP_RCVD:
4191	case IB_CM_MRA_REP_SENT:
4192	case IB_CM_REP_SENT:
4193	case IB_CM_MRA_REP_RCVD:
4194	case IB_CM_ESTABLISHED:
4195		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4196			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4197			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4198			switch (cm_id_priv->qp_type) {
4199			case IB_QPT_RC:
4200			case IB_QPT_XRC_INI:
4201				*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4202						 IB_QP_MAX_QP_RD_ATOMIC;
4203				qp_attr->retry_cnt = cm_id_priv->retry_count;
4204				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4205				qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4206				fallthrough;
4207			case IB_QPT_XRC_TGT:
4208				*qp_attr_mask |= IB_QP_TIMEOUT;
4209				qp_attr->timeout = cm_id_priv->av.timeout;
4210				break;
4211			default:
4212				break;
4213			}
4214			if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4215				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4216				qp_attr->path_mig_state = IB_MIG_REARM;
4217			}
4218		} else {
4219			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4220			if (cm_id_priv->alt_av.port)
4221				qp_attr->alt_port_num =
4222					cm_id_priv->alt_av.port->port_num;
4223			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4224			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4225			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4226			qp_attr->path_mig_state = IB_MIG_REARM;
4227		}
4228		ret = 0;
4229		break;
4230	default:
4231		trace_icm_qp_rts_err(&cm_id_priv->id);
4232		ret = -EINVAL;
4233		break;
4234	}
4235	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4236	return ret;
4237}
4238
4239int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4240		       struct ib_qp_attr *qp_attr,
4241		       int *qp_attr_mask)
4242{
4243	struct cm_id_private *cm_id_priv;
4244	int ret;
4245
4246	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4247	switch (qp_attr->qp_state) {
4248	case IB_QPS_INIT:
4249		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4250		break;
4251	case IB_QPS_RTR:
4252		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4253		break;
4254	case IB_QPS_RTS:
4255		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4256		break;
4257	default:
4258		ret = -EINVAL;
4259		break;
4260	}
4261	return ret;
4262}
4263EXPORT_SYMBOL(ib_cm_init_qp_attr);
4264
4265static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
4266			       struct ib_port_attribute *attr, char *buf)
4267{
4268	struct cm_counter_attribute *cm_attr =
4269		container_of(attr, struct cm_counter_attribute, attr);
4270	struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
4271
4272	if (WARN_ON(!cm_dev))
4273		return -EINVAL;
4274
4275	return sysfs_emit(
4276		buf, "%ld\n",
4277		atomic_long_read(
4278			&cm_dev->port[port_num - 1]
4279				 ->counters[cm_attr->group][cm_attr->index]));
4280}
4281
4282#define CM_COUNTER_ATTR(_name, _group, _index)                                 \
4283	{                                                                      \
4284		.attr = __ATTR(_name, 0444, cm_show_counter, NULL),            \
4285		.group = _group, .index = _index                               \
4286	}
4287
4288#define CM_COUNTER_GROUP(_group, _name)                                        \
4289	static struct cm_counter_attribute cm_counter_attr_##_group[] = {      \
4290		CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER),                  \
4291		CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER),                  \
4292		CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER),                  \
4293		CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER),                  \
4294		CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER),                  \
4295		CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER),                \
4296		CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER),                \
4297		CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER),        \
4298		CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER),        \
4299		CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER),                  \
4300		CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER),                  \
4301	};                                                                     \
4302	static struct attribute *cm_counter_attrs_##_group[] = {               \
4303		&cm_counter_attr_##_group[0].attr.attr,                        \
4304		&cm_counter_attr_##_group[1].attr.attr,                        \
4305		&cm_counter_attr_##_group[2].attr.attr,                        \
4306		&cm_counter_attr_##_group[3].attr.attr,                        \
4307		&cm_counter_attr_##_group[4].attr.attr,                        \
4308		&cm_counter_attr_##_group[5].attr.attr,                        \
4309		&cm_counter_attr_##_group[6].attr.attr,                        \
4310		&cm_counter_attr_##_group[7].attr.attr,                        \
4311		&cm_counter_attr_##_group[8].attr.attr,                        \
4312		&cm_counter_attr_##_group[9].attr.attr,                        \
4313		&cm_counter_attr_##_group[10].attr.attr,                       \
4314		NULL,                                                          \
4315	};                                                                     \
4316	static const struct attribute_group cm_counter_group_##_group = {      \
4317		.name = _name,                                                 \
4318		.attrs = cm_counter_attrs_##_group,                            \
4319	};
4320
4321CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
4322CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
4323CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
4324CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
4325
4326static const struct attribute_group *cm_counter_groups[] = {
4327	&cm_counter_group_CM_XMIT,
4328	&cm_counter_group_CM_XMIT_RETRIES,
4329	&cm_counter_group_CM_RECV,
4330	&cm_counter_group_CM_RECV_DUPLICATES,
4331	NULL,
4332};
4333
4334static int cm_add_one(struct ib_device *ib_device)
4335{
4336	struct cm_device *cm_dev;
4337	struct cm_port *port;
4338	struct ib_mad_reg_req reg_req = {
4339		.mgmt_class = IB_MGMT_CLASS_CM,
4340		.mgmt_class_version = IB_CM_CLASS_VERSION,
4341	};
4342	struct ib_port_modify port_modify = {
4343		.set_port_cap_mask = IB_PORT_CM_SUP
4344	};
4345	unsigned long flags;
4346	int ret;
4347	int count = 0;
4348	u32 i;
4349
4350	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4351			 GFP_KERNEL);
4352	if (!cm_dev)
4353		return -ENOMEM;
4354
4355	kref_init(&cm_dev->kref);
4356	spin_lock_init(&cm_dev->mad_agent_lock);
4357	cm_dev->ib_device = ib_device;
4358	cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4359	cm_dev->going_down = 0;
4360
4361	ib_set_client_data(ib_device, &cm_client, cm_dev);
4362
4363	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4364	rdma_for_each_port (ib_device, i) {
4365		if (!rdma_cap_ib_cm(ib_device, i))
4366			continue;
4367
4368		port = kzalloc(sizeof *port, GFP_KERNEL);
4369		if (!port) {
4370			ret = -ENOMEM;
4371			goto error1;
4372		}
4373
4374		cm_dev->port[i-1] = port;
4375		port->cm_dev = cm_dev;
4376		port->port_num = i;
4377
4378		ret = ib_port_register_client_groups(ib_device, i,
4379						     cm_counter_groups);
4380		if (ret)
4381			goto error1;
4382
4383		port->mad_agent = ib_register_mad_agent(ib_device, i,
4384							IB_QPT_GSI,
4385							&reg_req,
4386							0,
4387							cm_send_handler,
4388							cm_recv_handler,
4389							port,
4390							0);
4391		if (IS_ERR(port->mad_agent)) {
4392			ret = PTR_ERR(port->mad_agent);
4393			goto error2;
4394		}
4395
4396		ret = ib_modify_port(ib_device, i, 0, &port_modify);
4397		if (ret)
4398			goto error3;
4399
4400		count++;
4401	}
4402
4403	if (!count) {
4404		ret = -EOPNOTSUPP;
4405		goto free;
4406	}
4407
4408	write_lock_irqsave(&cm.device_lock, flags);
4409	list_add_tail(&cm_dev->list, &cm.device_list);
4410	write_unlock_irqrestore(&cm.device_lock, flags);
4411	return 0;
4412
4413error3:
4414	ib_unregister_mad_agent(port->mad_agent);
4415error2:
4416	ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
4417error1:
4418	port_modify.set_port_cap_mask = 0;
4419	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4420	while (--i) {
4421		if (!rdma_cap_ib_cm(ib_device, i))
4422			continue;
4423
4424		port = cm_dev->port[i-1];
4425		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4426		ib_unregister_mad_agent(port->mad_agent);
4427		ib_port_unregister_client_groups(ib_device, i,
4428						 cm_counter_groups);
4429	}
4430free:
4431	cm_device_put(cm_dev);
4432	return ret;
4433}
4434
4435static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4436{
4437	struct cm_device *cm_dev = client_data;
4438	struct cm_port *port;
4439	struct ib_port_modify port_modify = {
4440		.clr_port_cap_mask = IB_PORT_CM_SUP
4441	};
4442	unsigned long flags;
4443	u32 i;
4444
4445	write_lock_irqsave(&cm.device_lock, flags);
4446	list_del(&cm_dev->list);
4447	write_unlock_irqrestore(&cm.device_lock, flags);
4448
4449	spin_lock_irq(&cm.lock);
4450	cm_dev->going_down = 1;
4451	spin_unlock_irq(&cm.lock);
4452
4453	rdma_for_each_port (ib_device, i) {
4454		struct ib_mad_agent *mad_agent;
4455
4456		if (!rdma_cap_ib_cm(ib_device, i))
4457			continue;
4458
4459		port = cm_dev->port[i-1];
4460		mad_agent = port->mad_agent;
4461		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4462		/*
4463		 * We flush the queue here after the going_down set, this
4464		 * verify that no new works will be queued in the recv handler,
4465		 * after that we can call the unregister_mad_agent
4466		 */
4467		flush_workqueue(cm.wq);
4468		/*
4469		 * The above ensures no call paths from the work are running,
4470		 * the remaining paths all take the mad_agent_lock.
4471		 */
4472		spin_lock(&cm_dev->mad_agent_lock);
4473		port->mad_agent = NULL;
4474		spin_unlock(&cm_dev->mad_agent_lock);
4475		ib_unregister_mad_agent(mad_agent);
4476		ib_port_unregister_client_groups(ib_device, i,
4477						 cm_counter_groups);
4478	}
4479
4480	cm_device_put(cm_dev);
4481}
4482
4483static int __init ib_cm_init(void)
4484{
4485	int ret;
4486
4487	INIT_LIST_HEAD(&cm.device_list);
4488	rwlock_init(&cm.device_lock);
4489	spin_lock_init(&cm.lock);
4490	cm.listen_service_table = RB_ROOT;
4491	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4492	cm.remote_id_table = RB_ROOT;
4493	cm.remote_qp_table = RB_ROOT;
4494	cm.remote_sidr_table = RB_ROOT;
4495	xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4496	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4497	INIT_LIST_HEAD(&cm.timewait_list);
4498
4499	cm.wq = alloc_workqueue("ib_cm", 0, 1);
4500	if (!cm.wq) {
4501		ret = -ENOMEM;
4502		goto error2;
4503	}
4504
4505	ret = ib_register_client(&cm_client);
4506	if (ret)
4507		goto error3;
4508
4509	return 0;
4510error3:
4511	destroy_workqueue(cm.wq);
4512error2:
4513	return ret;
4514}
4515
4516static void __exit ib_cm_cleanup(void)
4517{
4518	struct cm_timewait_info *timewait_info, *tmp;
4519
4520	spin_lock_irq(&cm.lock);
4521	list_for_each_entry(timewait_info, &cm.timewait_list, list)
4522		cancel_delayed_work(&timewait_info->work.work);
4523	spin_unlock_irq(&cm.lock);
4524
4525	ib_unregister_client(&cm_client);
4526	destroy_workqueue(cm.wq);
4527
4528	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4529		list_del(&timewait_info->list);
4530		kfree(timewait_info);
4531	}
4532
4533	WARN_ON(!xa_empty(&cm.local_id_table));
4534}
4535
4536module_init(ib_cm_init);
4537module_exit(ib_cm_cleanup);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   5 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
   6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   7 * Copyright (c) 2019, Mellanox Technologies inc.  All rights reserved.
   8 */
   9
  10#include <linux/completion.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/device.h>
  13#include <linux/module.h>
  14#include <linux/err.h>
  15#include <linux/idr.h>
  16#include <linux/interrupt.h>
  17#include <linux/random.h>
  18#include <linux/rbtree.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sysfs.h>
  22#include <linux/workqueue.h>
  23#include <linux/kdev_t.h>
  24#include <linux/etherdevice.h>
  25
  26#include <rdma/ib_cache.h>
  27#include <rdma/ib_cm.h>
  28#include <rdma/ib_sysfs.h>
  29#include "cm_msgs.h"
  30#include "core_priv.h"
  31#include "cm_trace.h"
  32
  33MODULE_AUTHOR("Sean Hefty");
  34MODULE_DESCRIPTION("InfiniBand CM");
  35MODULE_LICENSE("Dual BSD/GPL");
  36
  37static const char * const ibcm_rej_reason_strs[] = {
  38	[IB_CM_REJ_NO_QP]			= "no QP",
  39	[IB_CM_REJ_NO_EEC]			= "no EEC",
  40	[IB_CM_REJ_NO_RESOURCES]		= "no resources",
  41	[IB_CM_REJ_TIMEOUT]			= "timeout",
  42	[IB_CM_REJ_UNSUPPORTED]			= "unsupported",
  43	[IB_CM_REJ_INVALID_COMM_ID]		= "invalid comm ID",
  44	[IB_CM_REJ_INVALID_COMM_INSTANCE]	= "invalid comm instance",
  45	[IB_CM_REJ_INVALID_SERVICE_ID]		= "invalid service ID",
  46	[IB_CM_REJ_INVALID_TRANSPORT_TYPE]	= "invalid transport type",
  47	[IB_CM_REJ_STALE_CONN]			= "stale conn",
  48	[IB_CM_REJ_RDC_NOT_EXIST]		= "RDC not exist",
  49	[IB_CM_REJ_INVALID_GID]			= "invalid GID",
  50	[IB_CM_REJ_INVALID_LID]			= "invalid LID",
  51	[IB_CM_REJ_INVALID_SL]			= "invalid SL",
  52	[IB_CM_REJ_INVALID_TRAFFIC_CLASS]	= "invalid traffic class",
  53	[IB_CM_REJ_INVALID_HOP_LIMIT]		= "invalid hop limit",
  54	[IB_CM_REJ_INVALID_PACKET_RATE]		= "invalid packet rate",
  55	[IB_CM_REJ_INVALID_ALT_GID]		= "invalid alt GID",
  56	[IB_CM_REJ_INVALID_ALT_LID]		= "invalid alt LID",
  57	[IB_CM_REJ_INVALID_ALT_SL]		= "invalid alt SL",
  58	[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS]	= "invalid alt traffic class",
  59	[IB_CM_REJ_INVALID_ALT_HOP_LIMIT]	= "invalid alt hop limit",
  60	[IB_CM_REJ_INVALID_ALT_PACKET_RATE]	= "invalid alt packet rate",
  61	[IB_CM_REJ_PORT_CM_REDIRECT]		= "port CM redirect",
  62	[IB_CM_REJ_PORT_REDIRECT]		= "port redirect",
  63	[IB_CM_REJ_INVALID_MTU]			= "invalid MTU",
  64	[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES]	= "insufficient resp resources",
  65	[IB_CM_REJ_CONSUMER_DEFINED]		= "consumer defined",
  66	[IB_CM_REJ_INVALID_RNR_RETRY]		= "invalid RNR retry",
  67	[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID]	= "duplicate local comm ID",
  68	[IB_CM_REJ_INVALID_CLASS_VERSION]	= "invalid class version",
  69	[IB_CM_REJ_INVALID_FLOW_LABEL]		= "invalid flow label",
  70	[IB_CM_REJ_INVALID_ALT_FLOW_LABEL]	= "invalid alt flow label",
  71	[IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
  72		"vendor option is not supported",
  73};
  74
  75const char *__attribute_const__ ibcm_reject_msg(int reason)
  76{
  77	size_t index = reason;
  78
  79	if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
  80	    ibcm_rej_reason_strs[index])
  81		return ibcm_rej_reason_strs[index];
  82	else
  83		return "unrecognized reason";
  84}
  85EXPORT_SYMBOL(ibcm_reject_msg);
  86
  87struct cm_id_private;
  88struct cm_work;
  89static int cm_add_one(struct ib_device *device);
  90static void cm_remove_one(struct ib_device *device, void *client_data);
  91static void cm_process_work(struct cm_id_private *cm_id_priv,
  92			    struct cm_work *work);
  93static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
  94				   struct ib_cm_sidr_rep_param *param);
  95static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
  96			       const void *private_data, u8 private_data_len);
  97static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
  98			       void *private_data, u8 private_data_len);
  99static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
 100			      enum ib_cm_rej_reason reason, void *ari,
 101			      u8 ari_length, const void *private_data,
 102			      u8 private_data_len);
 103
 104static struct ib_client cm_client = {
 105	.name   = "cm",
 106	.add    = cm_add_one,
 107	.remove = cm_remove_one
 108};
 109
 110static struct ib_cm {
 111	spinlock_t lock;
 112	struct list_head device_list;
 113	rwlock_t device_lock;
 114	struct rb_root listen_service_table;
 115	u64 listen_service_id;
 116	/* struct rb_root peer_service_table; todo: fix peer to peer */
 117	struct rb_root remote_qp_table;
 118	struct rb_root remote_id_table;
 119	struct rb_root remote_sidr_table;
 120	struct xarray local_id_table;
 121	u32 local_id_next;
 122	__be32 random_id_operand;
 123	struct list_head timewait_list;
 124	struct workqueue_struct *wq;
 125} cm;
 126
 127/* Counter indexes ordered by attribute ID */
 128enum {
 129	CM_REQ_COUNTER,
 130	CM_MRA_COUNTER,
 131	CM_REJ_COUNTER,
 132	CM_REP_COUNTER,
 133	CM_RTU_COUNTER,
 134	CM_DREQ_COUNTER,
 135	CM_DREP_COUNTER,
 136	CM_SIDR_REQ_COUNTER,
 137	CM_SIDR_REP_COUNTER,
 138	CM_LAP_COUNTER,
 139	CM_APR_COUNTER,
 140	CM_ATTR_COUNT,
 141	CM_ATTR_ID_OFFSET = 0x0010,
 142};
 143
 144enum {
 145	CM_XMIT,
 146	CM_XMIT_RETRIES,
 147	CM_RECV,
 148	CM_RECV_DUPLICATES,
 149	CM_COUNTER_GROUPS
 150};
 151
 152struct cm_counter_attribute {
 153	struct ib_port_attribute attr;
 154	unsigned short group;
 155	unsigned short index;
 156};
 157
 158struct cm_port {
 159	struct cm_device *cm_dev;
 160	struct ib_mad_agent *mad_agent;
 161	u32 port_num;
 162	atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
 163};
 164
 165struct cm_device {
 166	struct kref kref;
 167	struct list_head list;
 168	spinlock_t mad_agent_lock;
 169	struct ib_device *ib_device;
 170	u8 ack_delay;
 171	int going_down;
 172	struct cm_port *port[];
 173};
 174
 175struct cm_av {
 176	struct cm_port *port;
 177	struct rdma_ah_attr ah_attr;
 
 178	u16 pkey_index;
 179	u8 timeout;
 180};
 181
 182struct cm_work {
 183	struct delayed_work work;
 184	struct list_head list;
 185	struct cm_port *port;
 186	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
 187	__be32 local_id;			/* Established / timewait */
 188	__be32 remote_id;
 189	struct ib_cm_event cm_event;
 190	struct sa_path_rec path[];
 191};
 192
 193struct cm_timewait_info {
 194	struct cm_work work;
 195	struct list_head list;
 196	struct rb_node remote_qp_node;
 197	struct rb_node remote_id_node;
 198	__be64 remote_ca_guid;
 199	__be32 remote_qpn;
 200	u8 inserted_remote_qp;
 201	u8 inserted_remote_id;
 202};
 203
 204struct cm_id_private {
 205	struct ib_cm_id	id;
 206
 207	struct rb_node service_node;
 208	struct rb_node sidr_id_node;
 209	u32 sidr_slid;
 210	spinlock_t lock;	/* Do not acquire inside cm.lock */
 211	struct completion comp;
 212	refcount_t refcount;
 213	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
 214	 * Protected by the cm.lock spinlock.
 215	 */
 216	int listen_sharecount;
 217	struct rcu_head rcu;
 218
 219	struct ib_mad_send_buf *msg;
 220	struct cm_timewait_info *timewait_info;
 221	/* todo: use alternate port on send failure */
 222	struct cm_av av;
 223	struct cm_av alt_av;
 224
 225	void *private_data;
 226	__be64 tid;
 227	__be32 local_qpn;
 228	__be32 remote_qpn;
 229	enum ib_qp_type qp_type;
 230	__be32 sq_psn;
 231	__be32 rq_psn;
 232	int timeout_ms;
 233	enum ib_mtu path_mtu;
 234	__be16 pkey;
 235	u8 private_data_len;
 236	u8 max_cm_retries;
 237	u8 responder_resources;
 238	u8 initiator_depth;
 239	u8 retry_count;
 240	u8 rnr_retry_count;
 241	u8 service_timeout;
 242	u8 target_ack_delay;
 243
 244	struct list_head work_list;
 245	atomic_t work_count;
 246
 247	struct rdma_ucm_ece ece;
 248};
 249
 250static void cm_dev_release(struct kref *kref)
 251{
 252	struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
 253	u32 i;
 254
 255	rdma_for_each_port(cm_dev->ib_device, i)
 256		kfree(cm_dev->port[i - 1]);
 257
 258	kfree(cm_dev);
 259}
 260
 261static void cm_device_put(struct cm_device *cm_dev)
 262{
 263	kref_put(&cm_dev->kref, cm_dev_release);
 264}
 265
 266static void cm_work_handler(struct work_struct *work);
 267
 268static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 269{
 270	if (refcount_dec_and_test(&cm_id_priv->refcount))
 271		complete(&cm_id_priv->comp);
 272}
 273
 274static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
 275{
 276	struct ib_mad_agent *mad_agent;
 277	struct ib_mad_send_buf *m;
 278	struct ib_ah *ah;
 279
 280	lockdep_assert_held(&cm_id_priv->lock);
 281
 282	if (!cm_id_priv->av.port)
 283		return ERR_PTR(-EINVAL);
 284
 285	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
 286	mad_agent = cm_id_priv->av.port->mad_agent;
 287	if (!mad_agent) {
 288		m = ERR_PTR(-EINVAL);
 289		goto out;
 290	}
 291
 292	ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
 293	if (IS_ERR(ah)) {
 294		m = ERR_CAST(ah);
 295		goto out;
 296	}
 297
 298	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
 299			       cm_id_priv->av.pkey_index,
 300			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
 301			       GFP_ATOMIC,
 302			       IB_MGMT_BASE_VERSION);
 303	if (IS_ERR(m)) {
 304		rdma_destroy_ah(ah, 0);
 305		goto out;
 306	}
 307
 308	/* Timeout set by caller if response is expected. */
 309	m->ah = ah;
 310	m->retries = cm_id_priv->max_cm_retries;
 311
 312	refcount_inc(&cm_id_priv->refcount);
 313	m->context[0] = cm_id_priv;
 314
 315out:
 316	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
 317	return m;
 318}
 319
 320static void cm_free_msg(struct ib_mad_send_buf *msg)
 321{
 322	struct cm_id_private *cm_id_priv = msg->context[0];
 323
 324	if (msg->ah)
 325		rdma_destroy_ah(msg->ah, 0);
 326	cm_deref_id(cm_id_priv);
 327	ib_free_send_mad(msg);
 328}
 329
 330static struct ib_mad_send_buf *
 331cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
 332{
 333	struct ib_mad_send_buf *msg;
 334
 335	lockdep_assert_held(&cm_id_priv->lock);
 336
 337	msg = cm_alloc_msg(cm_id_priv);
 338	if (IS_ERR(msg))
 339		return msg;
 340	cm_id_priv->msg = msg;
 341	return msg;
 342}
 343
 344static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
 345{
 346	struct cm_id_private *cm_id_priv = msg->context[0];
 347
 348	lockdep_assert_held(&cm_id_priv->lock);
 349
 350	if (!WARN_ON(cm_id_priv->msg != msg))
 351		cm_id_priv->msg = NULL;
 352
 353	if (msg->ah)
 354		rdma_destroy_ah(msg->ah, 0);
 355	cm_deref_id(cm_id_priv);
 356	ib_free_send_mad(msg);
 357}
 358
 359static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
 360							   struct ib_mad_recv_wc *mad_recv_wc)
 361{
 362	return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
 363				  0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
 364				  GFP_ATOMIC,
 365				  IB_MGMT_BASE_VERSION);
 366}
 367
 368static int cm_create_response_msg_ah(struct cm_port *port,
 369				     struct ib_mad_recv_wc *mad_recv_wc,
 370				     struct ib_mad_send_buf *msg)
 371{
 372	struct ib_ah *ah;
 373
 374	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
 375				  mad_recv_wc->recv_buf.grh, port->port_num);
 376	if (IS_ERR(ah))
 377		return PTR_ERR(ah);
 378
 379	msg->ah = ah;
 380	return 0;
 381}
 382
 383static int cm_alloc_response_msg(struct cm_port *port,
 384				 struct ib_mad_recv_wc *mad_recv_wc,
 385				 struct ib_mad_send_buf **msg)
 386{
 387	struct ib_mad_send_buf *m;
 388	int ret;
 389
 390	m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
 391	if (IS_ERR(m))
 392		return PTR_ERR(m);
 393
 394	ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
 395	if (ret) {
 396		ib_free_send_mad(m);
 397		return ret;
 398	}
 399
 400	*msg = m;
 401	return 0;
 402}
 403
 404static void cm_free_response_msg(struct ib_mad_send_buf *msg)
 405{
 406	if (msg->ah)
 407		rdma_destroy_ah(msg->ah, 0);
 408	ib_free_send_mad(msg);
 409}
 410
 411static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
 412{
 413	void *data;
 414
 415	if (!private_data || !private_data_len)
 416		return NULL;
 417
 418	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
 419	if (!data)
 420		return ERR_PTR(-ENOMEM);
 421
 422	return data;
 423}
 424
 425static void cm_set_private_data(struct cm_id_private *cm_id_priv,
 426				 void *private_data, u8 private_data_len)
 427{
 428	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
 429		kfree(cm_id_priv->private_data);
 430
 431	cm_id_priv->private_data = private_data;
 432	cm_id_priv->private_data_len = private_data_len;
 433}
 434
 435static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
 436{
 437	struct cm_port *old_port = av->port;
 438
 439	if (old_port == port)
 440		return;
 441
 442	av->port = port;
 443	if (old_port)
 444		cm_device_put(old_port->cm_dev);
 445	if (port)
 446		kref_get(&port->cm_dev->kref);
 447}
 448
 449static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
 450			       struct rdma_ah_attr *ah_attr, struct cm_av *av)
 451{
 452	cm_set_av_port(av, port);
 453	av->pkey_index = wc->pkey_index;
 454	rdma_move_ah_attr(&av->ah_attr, ah_attr);
 455}
 456
 457static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
 458				   struct ib_grh *grh, struct cm_av *av)
 459{
 460	cm_set_av_port(av, port);
 461	av->pkey_index = wc->pkey_index;
 462	return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
 463				       port->port_num, wc,
 464				       grh, &av->ah_attr);
 465}
 466
 467static struct cm_port *
 468get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
 469{
 470	struct cm_device *cm_dev;
 471	struct cm_port *port = NULL;
 472	unsigned long flags;
 473
 474	if (attr) {
 475		read_lock_irqsave(&cm.device_lock, flags);
 476		list_for_each_entry(cm_dev, &cm.device_list, list) {
 477			if (cm_dev->ib_device == attr->device) {
 478				port = cm_dev->port[attr->port_num - 1];
 479				break;
 480			}
 481		}
 482		read_unlock_irqrestore(&cm.device_lock, flags);
 483	} else {
 484		/* SGID attribute can be NULL in following
 485		 * conditions.
 486		 * (a) Alternative path
 487		 * (b) IB link layer without GRH
 488		 * (c) LAP send messages
 489		 */
 490		read_lock_irqsave(&cm.device_lock, flags);
 491		list_for_each_entry(cm_dev, &cm.device_list, list) {
 492			attr = rdma_find_gid(cm_dev->ib_device,
 493					     &path->sgid,
 494					     sa_conv_pathrec_to_gid_type(path),
 495					     NULL);
 496			if (!IS_ERR(attr)) {
 497				port = cm_dev->port[attr->port_num - 1];
 498				break;
 499			}
 500		}
 501		read_unlock_irqrestore(&cm.device_lock, flags);
 502		if (port)
 503			rdma_put_gid_attr(attr);
 504	}
 505	return port;
 506}
 507
 508static int cm_init_av_by_path(struct sa_path_rec *path,
 509			      const struct ib_gid_attr *sgid_attr,
 510			      struct cm_av *av)
 511{
 512	struct rdma_ah_attr new_ah_attr;
 513	struct cm_device *cm_dev;
 514	struct cm_port *port;
 515	int ret;
 516
 517	port = get_cm_port_from_path(path, sgid_attr);
 518	if (!port)
 519		return -EINVAL;
 520	cm_dev = port->cm_dev;
 521
 522	ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
 523				  be16_to_cpu(path->pkey), &av->pkey_index);
 524	if (ret)
 525		return ret;
 526
 527	cm_set_av_port(av, port);
 528
 529	/*
 530	 * av->ah_attr might be initialized based on wc or during
 531	 * request processing time which might have reference to sgid_attr.
 532	 * So initialize a new ah_attr on stack.
 533	 * If initialization fails, old ah_attr is used for sending any
 534	 * responses. If initialization is successful, than new ah_attr
 535	 * is used by overwriting the old one. So that right ah_attr
 536	 * can be used to return an error response.
 537	 */
 538	ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
 539					&new_ah_attr, sgid_attr);
 540	if (ret)
 541		return ret;
 542
 543	av->timeout = path->packet_life_time + 1;
 544	rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
 545	return 0;
 546}
 547
 548/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
 549static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
 550{
 551	cm_set_av_port(dest, src->port);
 552	cm_set_av_port(src, NULL);
 553	dest->pkey_index = src->pkey_index;
 554	rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
 555	dest->timeout = src->timeout;
 556}
 557
 558static void cm_destroy_av(struct cm_av *av)
 559{
 560	rdma_destroy_ah_attr(&av->ah_attr);
 561	cm_set_av_port(av, NULL);
 562}
 563
 564static u32 cm_local_id(__be32 local_id)
 565{
 566	return (__force u32) (local_id ^ cm.random_id_operand);
 567}
 568
 569static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
 570{
 571	struct cm_id_private *cm_id_priv;
 572
 573	rcu_read_lock();
 574	cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
 575	if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
 576	    !refcount_inc_not_zero(&cm_id_priv->refcount))
 577		cm_id_priv = NULL;
 578	rcu_read_unlock();
 579
 580	return cm_id_priv;
 581}
 582
 583/*
 584 * Trivial helpers to strip endian annotation and compare; the
 585 * endianness doesn't actually matter since we just need a stable
 586 * order for the RB tree.
 587 */
 588static int be32_lt(__be32 a, __be32 b)
 589{
 590	return (__force u32) a < (__force u32) b;
 591}
 592
 593static int be32_gt(__be32 a, __be32 b)
 594{
 595	return (__force u32) a > (__force u32) b;
 596}
 597
 598static int be64_lt(__be64 a, __be64 b)
 599{
 600	return (__force u64) a < (__force u64) b;
 601}
 602
 603static int be64_gt(__be64 a, __be64 b)
 604{
 605	return (__force u64) a > (__force u64) b;
 606}
 607
 608/*
 609 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
 610 * if the new ID was inserted, NULL if it could not be inserted due to a
 611 * collision, or the existing cm_id_priv ready for shared usage.
 612 */
 613static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
 614					      ib_cm_handler shared_handler)
 615{
 616	struct rb_node **link = &cm.listen_service_table.rb_node;
 617	struct rb_node *parent = NULL;
 618	struct cm_id_private *cur_cm_id_priv;
 619	__be64 service_id = cm_id_priv->id.service_id;
 620	__be64 service_mask = cm_id_priv->id.service_mask;
 621	unsigned long flags;
 622
 623	spin_lock_irqsave(&cm.lock, flags);
 624	while (*link) {
 625		parent = *link;
 626		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
 627					  service_node);
 628		if ((cur_cm_id_priv->id.service_mask & service_id) ==
 629		    (service_mask & cur_cm_id_priv->id.service_id) &&
 630		    (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
 
 
 
 
 
 
 
 631			/*
 632			 * Sharing an ib_cm_id with different handlers is not
 633			 * supported
 634			 */
 635			if (cur_cm_id_priv->id.cm_handler != shared_handler ||
 636			    cur_cm_id_priv->id.context ||
 637			    WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
 638				spin_unlock_irqrestore(&cm.lock, flags);
 639				return NULL;
 640			}
 641			refcount_inc(&cur_cm_id_priv->refcount);
 642			cur_cm_id_priv->listen_sharecount++;
 643			spin_unlock_irqrestore(&cm.lock, flags);
 644			return cur_cm_id_priv;
 645		}
 646
 647		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
 648			link = &(*link)->rb_left;
 649		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
 650			link = &(*link)->rb_right;
 651		else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
 652			link = &(*link)->rb_left;
 653		else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
 654			link = &(*link)->rb_right;
 655		else
 656			link = &(*link)->rb_right;
 657	}
 658	cm_id_priv->listen_sharecount++;
 659	rb_link_node(&cm_id_priv->service_node, parent, link);
 660	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
 661	spin_unlock_irqrestore(&cm.lock, flags);
 662	return cm_id_priv;
 663}
 664
 665static struct cm_id_private *cm_find_listen(struct ib_device *device,
 666					    __be64 service_id)
 667{
 668	struct rb_node *node = cm.listen_service_table.rb_node;
 669	struct cm_id_private *cm_id_priv;
 670
 671	while (node) {
 672		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
 673		if ((cm_id_priv->id.service_mask & service_id) ==
 674		     cm_id_priv->id.service_id &&
 675		    (cm_id_priv->id.device == device)) {
 676			refcount_inc(&cm_id_priv->refcount);
 677			return cm_id_priv;
 678		}
 679		if (device < cm_id_priv->id.device)
 680			node = node->rb_left;
 681		else if (device > cm_id_priv->id.device)
 682			node = node->rb_right;
 683		else if (be64_lt(service_id, cm_id_priv->id.service_id))
 684			node = node->rb_left;
 685		else if (be64_gt(service_id, cm_id_priv->id.service_id))
 686			node = node->rb_right;
 687		else
 688			node = node->rb_right;
 
 
 689	}
 690	return NULL;
 691}
 692
 693static struct cm_timewait_info *
 694cm_insert_remote_id(struct cm_timewait_info *timewait_info)
 695{
 696	struct rb_node **link = &cm.remote_id_table.rb_node;
 697	struct rb_node *parent = NULL;
 698	struct cm_timewait_info *cur_timewait_info;
 699	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
 700	__be32 remote_id = timewait_info->work.remote_id;
 701
 702	while (*link) {
 703		parent = *link;
 704		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
 705					     remote_id_node);
 706		if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
 707			link = &(*link)->rb_left;
 708		else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
 709			link = &(*link)->rb_right;
 710		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 711			link = &(*link)->rb_left;
 712		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 713			link = &(*link)->rb_right;
 714		else
 715			return cur_timewait_info;
 716	}
 717	timewait_info->inserted_remote_id = 1;
 718	rb_link_node(&timewait_info->remote_id_node, parent, link);
 719	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
 720	return NULL;
 721}
 722
 723static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
 724					       __be32 remote_id)
 725{
 726	struct rb_node *node = cm.remote_id_table.rb_node;
 727	struct cm_timewait_info *timewait_info;
 728	struct cm_id_private *res = NULL;
 729
 730	spin_lock_irq(&cm.lock);
 731	while (node) {
 732		timewait_info = rb_entry(node, struct cm_timewait_info,
 733					 remote_id_node);
 734		if (be32_lt(remote_id, timewait_info->work.remote_id))
 735			node = node->rb_left;
 736		else if (be32_gt(remote_id, timewait_info->work.remote_id))
 737			node = node->rb_right;
 738		else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
 739			node = node->rb_left;
 740		else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
 741			node = node->rb_right;
 742		else {
 743			res = cm_acquire_id(timewait_info->work.local_id,
 744					     timewait_info->work.remote_id);
 745			break;
 746		}
 747	}
 748	spin_unlock_irq(&cm.lock);
 749	return res;
 750}
 751
 752static struct cm_timewait_info *
 753cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
 754{
 755	struct rb_node **link = &cm.remote_qp_table.rb_node;
 756	struct rb_node *parent = NULL;
 757	struct cm_timewait_info *cur_timewait_info;
 758	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
 759	__be32 remote_qpn = timewait_info->remote_qpn;
 760
 761	while (*link) {
 762		parent = *link;
 763		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
 764					     remote_qp_node);
 765		if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
 766			link = &(*link)->rb_left;
 767		else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
 768			link = &(*link)->rb_right;
 769		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 770			link = &(*link)->rb_left;
 771		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
 772			link = &(*link)->rb_right;
 773		else
 774			return cur_timewait_info;
 775	}
 776	timewait_info->inserted_remote_qp = 1;
 777	rb_link_node(&timewait_info->remote_qp_node, parent, link);
 778	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
 779	return NULL;
 780}
 781
 782static struct cm_id_private *
 783cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
 784{
 785	struct rb_node **link = &cm.remote_sidr_table.rb_node;
 786	struct rb_node *parent = NULL;
 787	struct cm_id_private *cur_cm_id_priv;
 788	__be32 remote_id = cm_id_priv->id.remote_id;
 789
 790	while (*link) {
 791		parent = *link;
 792		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
 793					  sidr_id_node);
 794		if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
 795			link = &(*link)->rb_left;
 796		else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
 797			link = &(*link)->rb_right;
 798		else {
 799			if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
 800				link = &(*link)->rb_left;
 801			else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
 802				link = &(*link)->rb_right;
 803			else
 804				return cur_cm_id_priv;
 805		}
 806	}
 807	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
 808	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
 809	return NULL;
 810}
 811
 812static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
 813					      ib_cm_handler cm_handler,
 814					      void *context)
 815{
 816	struct cm_id_private *cm_id_priv;
 817	u32 id;
 818	int ret;
 819
 820	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
 821	if (!cm_id_priv)
 822		return ERR_PTR(-ENOMEM);
 823
 824	cm_id_priv->id.state = IB_CM_IDLE;
 825	cm_id_priv->id.device = device;
 826	cm_id_priv->id.cm_handler = cm_handler;
 827	cm_id_priv->id.context = context;
 828	cm_id_priv->id.remote_cm_qpn = 1;
 829
 830	RB_CLEAR_NODE(&cm_id_priv->service_node);
 831	RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
 832	spin_lock_init(&cm_id_priv->lock);
 833	init_completion(&cm_id_priv->comp);
 834	INIT_LIST_HEAD(&cm_id_priv->work_list);
 835	atomic_set(&cm_id_priv->work_count, -1);
 836	refcount_set(&cm_id_priv->refcount, 1);
 837
 838	ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
 839			      &cm.local_id_next, GFP_KERNEL);
 840	if (ret < 0)
 841		goto error;
 842	cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
 843
 844	return cm_id_priv;
 845
 846error:
 847	kfree(cm_id_priv);
 848	return ERR_PTR(ret);
 849}
 850
 851/*
 852 * Make the ID visible to the MAD handlers and other threads that use the
 853 * xarray.
 854 */
 855static void cm_finalize_id(struct cm_id_private *cm_id_priv)
 856{
 857	xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
 858		 cm_id_priv, GFP_ATOMIC);
 859}
 860
 861struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
 862				 ib_cm_handler cm_handler,
 863				 void *context)
 864{
 865	struct cm_id_private *cm_id_priv;
 866
 867	cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
 868	if (IS_ERR(cm_id_priv))
 869		return ERR_CAST(cm_id_priv);
 870
 871	cm_finalize_id(cm_id_priv);
 872	return &cm_id_priv->id;
 873}
 874EXPORT_SYMBOL(ib_create_cm_id);
 875
 876static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
 877{
 878	struct cm_work *work;
 879
 880	if (list_empty(&cm_id_priv->work_list))
 881		return NULL;
 882
 883	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
 884	list_del(&work->list);
 885	return work;
 886}
 887
 888static void cm_free_work(struct cm_work *work)
 889{
 890	if (work->mad_recv_wc)
 891		ib_free_recv_mad(work->mad_recv_wc);
 892	kfree(work);
 893}
 894
 895static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
 896				 struct cm_work *work)
 897	__releases(&cm_id_priv->lock)
 898{
 899	bool immediate;
 900
 901	/*
 902	 * To deliver the event to the user callback we have the drop the
 903	 * spinlock, however, we need to ensure that the user callback is single
 904	 * threaded and receives events in the temporal order. If there are
 905	 * already events being processed then thread new events onto a list,
 906	 * the thread currently processing will pick them up.
 907	 */
 908	immediate = atomic_inc_and_test(&cm_id_priv->work_count);
 909	if (!immediate) {
 910		list_add_tail(&work->list, &cm_id_priv->work_list);
 911		/*
 912		 * This routine always consumes incoming reference. Once queued
 913		 * to the work_list then a reference is held by the thread
 914		 * currently running cm_process_work() and this reference is not
 915		 * needed.
 916		 */
 917		cm_deref_id(cm_id_priv);
 918	}
 919	spin_unlock_irq(&cm_id_priv->lock);
 920
 921	if (immediate)
 922		cm_process_work(cm_id_priv, work);
 923}
 924
 925static inline int cm_convert_to_ms(int iba_time)
 926{
 927	/* approximate conversion to ms from 4.096us x 2^iba_time */
 928	return 1 << max(iba_time - 8, 0);
 929}
 930
 931/*
 932 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
 933 * Because of how ack_timeout is stored, adding one doubles the timeout.
 934 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
 935 * increment it (round up) only if the other is within 50%.
 936 */
 937static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
 938{
 939	int ack_timeout = packet_life_time + 1;
 940
 941	if (ack_timeout >= ca_ack_delay)
 942		ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
 943	else
 944		ack_timeout = ca_ack_delay +
 945			      (ack_timeout >= (ca_ack_delay - 1));
 946
 947	return min(31, ack_timeout);
 948}
 949
 950static void cm_remove_remote(struct cm_id_private *cm_id_priv)
 951{
 952	struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
 953
 954	if (timewait_info->inserted_remote_id) {
 955		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
 956		timewait_info->inserted_remote_id = 0;
 957	}
 958
 959	if (timewait_info->inserted_remote_qp) {
 960		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
 961		timewait_info->inserted_remote_qp = 0;
 962	}
 963}
 964
 965static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
 966{
 967	struct cm_timewait_info *timewait_info;
 968
 969	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
 970	if (!timewait_info)
 971		return ERR_PTR(-ENOMEM);
 972
 973	timewait_info->work.local_id = local_id;
 974	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
 975	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
 976	return timewait_info;
 977}
 978
 979static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
 980{
 981	int wait_time;
 982	unsigned long flags;
 983	struct cm_device *cm_dev;
 984
 985	lockdep_assert_held(&cm_id_priv->lock);
 986
 987	cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
 988	if (!cm_dev)
 989		return;
 990
 991	spin_lock_irqsave(&cm.lock, flags);
 992	cm_remove_remote(cm_id_priv);
 993	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
 994	spin_unlock_irqrestore(&cm.lock, flags);
 995
 996	/*
 997	 * The cm_id could be destroyed by the user before we exit timewait.
 998	 * To protect against this, we search for the cm_id after exiting
 999	 * timewait before notifying the user that we've exited timewait.
1000	 */
1001	cm_id_priv->id.state = IB_CM_TIMEWAIT;
1002	wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
1003
1004	/* Check if the device started its remove_one */
1005	spin_lock_irqsave(&cm.lock, flags);
1006	if (!cm_dev->going_down)
1007		queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1008				   msecs_to_jiffies(wait_time));
1009	spin_unlock_irqrestore(&cm.lock, flags);
1010
1011	/*
1012	 * The timewait_info is converted into a work and gets freed during
1013	 * cm_free_work() in cm_timewait_handler().
1014	 */
1015	BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1016	cm_id_priv->timewait_info = NULL;
1017}
1018
1019static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1020{
1021	unsigned long flags;
1022
1023	lockdep_assert_held(&cm_id_priv->lock);
1024
1025	cm_id_priv->id.state = IB_CM_IDLE;
1026	if (cm_id_priv->timewait_info) {
1027		spin_lock_irqsave(&cm.lock, flags);
1028		cm_remove_remote(cm_id_priv);
1029		spin_unlock_irqrestore(&cm.lock, flags);
1030		kfree(cm_id_priv->timewait_info);
1031		cm_id_priv->timewait_info = NULL;
1032	}
1033}
1034
1035static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1036{
1037	struct cm_id_private *cm_id_priv;
1038	struct cm_work *work;
1039
1040	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1041	spin_lock_irq(&cm_id_priv->lock);
1042retest:
1043	switch (cm_id->state) {
1044	case IB_CM_LISTEN:
1045		spin_lock(&cm.lock);
1046		if (--cm_id_priv->listen_sharecount > 0) {
1047			/* The id is still shared. */
1048			WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1049			spin_unlock(&cm.lock);
1050			spin_unlock_irq(&cm_id_priv->lock);
1051			cm_deref_id(cm_id_priv);
1052			return;
1053		}
1054		cm_id->state = IB_CM_IDLE;
1055		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1056		RB_CLEAR_NODE(&cm_id_priv->service_node);
1057		spin_unlock(&cm.lock);
1058		break;
1059	case IB_CM_SIDR_REQ_SENT:
1060		cm_id->state = IB_CM_IDLE;
1061		ib_cancel_mad(cm_id_priv->msg);
1062		break;
1063	case IB_CM_SIDR_REQ_RCVD:
1064		cm_send_sidr_rep_locked(cm_id_priv,
1065					&(struct ib_cm_sidr_rep_param){
1066						.status = IB_SIDR_REJECT });
1067		/* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1068		cm_id->state = IB_CM_IDLE;
1069		break;
1070	case IB_CM_REQ_SENT:
1071	case IB_CM_MRA_REQ_RCVD:
1072		ib_cancel_mad(cm_id_priv->msg);
1073		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1074				   &cm_id_priv->id.device->node_guid,
1075				   sizeof(cm_id_priv->id.device->node_guid),
1076				   NULL, 0);
1077		break;
1078	case IB_CM_REQ_RCVD:
1079		if (err == -ENOMEM) {
1080			/* Do not reject to allow future retries. */
1081			cm_reset_to_idle(cm_id_priv);
1082		} else {
1083			cm_send_rej_locked(cm_id_priv,
1084					   IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1085					   NULL, 0);
1086		}
1087		break;
1088	case IB_CM_REP_SENT:
1089	case IB_CM_MRA_REP_RCVD:
1090		ib_cancel_mad(cm_id_priv->msg);
1091		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1092				   0, NULL, 0);
1093		goto retest;
1094	case IB_CM_MRA_REQ_SENT:
1095	case IB_CM_REP_RCVD:
1096	case IB_CM_MRA_REP_SENT:
1097		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1098				   0, NULL, 0);
1099		break;
1100	case IB_CM_ESTABLISHED:
1101		if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1102			cm_id->state = IB_CM_IDLE;
1103			break;
1104		}
1105		cm_send_dreq_locked(cm_id_priv, NULL, 0);
1106		goto retest;
1107	case IB_CM_DREQ_SENT:
1108		ib_cancel_mad(cm_id_priv->msg);
1109		cm_enter_timewait(cm_id_priv);
1110		goto retest;
1111	case IB_CM_DREQ_RCVD:
1112		cm_send_drep_locked(cm_id_priv, NULL, 0);
1113		WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1114		goto retest;
1115	case IB_CM_TIMEWAIT:
1116		/*
1117		 * The cm_acquire_id in cm_timewait_handler will stop working
1118		 * once we do xa_erase below, so just move to idle here for
1119		 * consistency.
1120		 */
1121		cm_id->state = IB_CM_IDLE;
1122		break;
1123	case IB_CM_IDLE:
1124		break;
1125	}
1126	WARN_ON(cm_id->state != IB_CM_IDLE);
1127
1128	spin_lock(&cm.lock);
1129	/* Required for cleanup paths related cm_req_handler() */
1130	if (cm_id_priv->timewait_info) {
1131		cm_remove_remote(cm_id_priv);
1132		kfree(cm_id_priv->timewait_info);
1133		cm_id_priv->timewait_info = NULL;
1134	}
1135
1136	WARN_ON(cm_id_priv->listen_sharecount);
1137	WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1138	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1139		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1140	spin_unlock(&cm.lock);
1141	spin_unlock_irq(&cm_id_priv->lock);
1142
1143	xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1144	cm_deref_id(cm_id_priv);
1145	wait_for_completion(&cm_id_priv->comp);
1146	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1147		cm_free_work(work);
1148
1149	cm_destroy_av(&cm_id_priv->av);
1150	cm_destroy_av(&cm_id_priv->alt_av);
1151	kfree(cm_id_priv->private_data);
1152	kfree_rcu(cm_id_priv, rcu);
1153}
1154
1155void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1156{
1157	cm_destroy_id(cm_id, 0);
1158}
1159EXPORT_SYMBOL(ib_destroy_cm_id);
1160
1161static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1162			  __be64 service_mask)
1163{
1164	service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1165	service_id &= service_mask;
1166	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1167	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
1168		return -EINVAL;
1169
1170	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1171		cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1172		cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1173	} else {
1174		cm_id_priv->id.service_id = service_id;
1175		cm_id_priv->id.service_mask = service_mask;
1176	}
1177	return 0;
1178}
1179
1180/**
1181 * ib_cm_listen - Initiates listening on the specified service ID for
1182 *   connection and service ID resolution requests.
1183 * @cm_id: Connection identifier associated with the listen request.
1184 * @service_id: Service identifier matched against incoming connection
1185 *   and service ID resolution requests.  The service ID should be specified
1186 *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1187 *   assign a service ID to the caller.
1188 * @service_mask: Mask applied to service ID used to listen across a
1189 *   range of service IDs.  If set to 0, the service ID is matched
1190 *   exactly.  This parameter is ignored if %service_id is set to
1191 *   IB_CM_ASSIGN_SERVICE_ID.
1192 */
1193int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1194{
1195	struct cm_id_private *cm_id_priv =
1196		container_of(cm_id, struct cm_id_private, id);
1197	unsigned long flags;
1198	int ret;
1199
1200	spin_lock_irqsave(&cm_id_priv->lock, flags);
1201	if (cm_id_priv->id.state != IB_CM_IDLE) {
1202		ret = -EINVAL;
1203		goto out;
1204	}
1205
1206	ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1207	if (ret)
1208		goto out;
1209
1210	if (!cm_insert_listen(cm_id_priv, NULL)) {
1211		ret = -EBUSY;
1212		goto out;
1213	}
1214
1215	cm_id_priv->id.state = IB_CM_LISTEN;
1216	ret = 0;
1217
1218out:
1219	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1220	return ret;
1221}
1222EXPORT_SYMBOL(ib_cm_listen);
1223
1224/**
1225 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1226 *			 the given service ID.
1227 *
1228 * If there's an existing ID listening on that same device and service ID,
1229 * return it.
1230 *
1231 * @device: Device associated with the cm_id.  All related communication will
1232 * be associated with the specified device.
1233 * @cm_handler: Callback invoked to notify the user of CM events.
1234 * @service_id: Service identifier matched against incoming connection
1235 *   and service ID resolution requests.  The service ID should be specified
1236 *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1237 *   assign a service ID to the caller.
1238 *
1239 * Callers should call ib_destroy_cm_id when done with the listener ID.
1240 */
1241struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1242				     ib_cm_handler cm_handler,
1243				     __be64 service_id)
1244{
1245	struct cm_id_private *listen_id_priv;
1246	struct cm_id_private *cm_id_priv;
1247	int err = 0;
1248
1249	/* Create an ID in advance, since the creation may sleep */
1250	cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1251	if (IS_ERR(cm_id_priv))
1252		return ERR_CAST(cm_id_priv);
1253
1254	err = cm_init_listen(cm_id_priv, service_id, 0);
1255	if (err)
 
1256		return ERR_PTR(err);
 
1257
1258	spin_lock_irq(&cm_id_priv->lock);
1259	listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1260	if (listen_id_priv != cm_id_priv) {
1261		spin_unlock_irq(&cm_id_priv->lock);
1262		ib_destroy_cm_id(&cm_id_priv->id);
1263		if (!listen_id_priv)
1264			return ERR_PTR(-EINVAL);
1265		return &listen_id_priv->id;
1266	}
1267	cm_id_priv->id.state = IB_CM_LISTEN;
1268	spin_unlock_irq(&cm_id_priv->lock);
1269
1270	/*
1271	 * A listen ID does not need to be in the xarray since it does not
1272	 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1273	 * and does not enter timewait.
1274	 */
1275
1276	return &cm_id_priv->id;
1277}
1278EXPORT_SYMBOL(ib_cm_insert_listen);
1279
1280static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1281{
1282	u64 hi_tid = 0, low_tid;
1283
1284	lockdep_assert_held(&cm_id_priv->lock);
1285
1286	low_tid = (u64)cm_id_priv->id.local_id;
1287	if (!cm_id_priv->av.port)
1288		return cpu_to_be64(low_tid);
1289
1290	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1291	if (cm_id_priv->av.port->mad_agent)
1292		hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1293	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1294	return cpu_to_be64(hi_tid | low_tid);
1295}
1296
1297static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1298			      __be16 attr_id, __be64 tid)
1299{
1300	hdr->base_version  = IB_MGMT_BASE_VERSION;
1301	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
1302	hdr->class_version = IB_CM_CLASS_VERSION;
1303	hdr->method	   = IB_MGMT_METHOD_SEND;
1304	hdr->attr_id	   = attr_id;
1305	hdr->tid	   = tid;
1306}
1307
1308static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1309				  __be64 tid, u32 attr_mod)
1310{
1311	cm_format_mad_hdr(hdr, attr_id, tid);
1312	hdr->attr_mod = cpu_to_be32(attr_mod);
1313}
1314
1315static void cm_format_req(struct cm_req_msg *req_msg,
1316			  struct cm_id_private *cm_id_priv,
1317			  struct ib_cm_req_param *param)
1318{
1319	struct sa_path_rec *pri_path = param->primary_path;
1320	struct sa_path_rec *alt_path = param->alternate_path;
1321	bool pri_ext = false;
 
1322
1323	if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1324		pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1325					      pri_path->opa.slid);
1326
1327	cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1328			      cm_form_tid(cm_id_priv), param->ece.attr_mod);
1329
1330	IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1331		be32_to_cpu(cm_id_priv->id.local_id));
1332	IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1333	IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1334		be64_to_cpu(cm_id_priv->id.device->node_guid));
1335	IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1336	IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1337	IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1338		param->remote_cm_response_timeout);
1339	cm_req_set_qp_type(req_msg, param->qp_type);
1340	IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1341	IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1342	IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1343		param->local_cm_response_timeout);
1344	IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1345		be16_to_cpu(param->primary_path->pkey));
1346	IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1347		param->primary_path->mtu);
1348	IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1349
1350	if (param->qp_type != IB_QPT_XRC_INI) {
1351		IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1352			param->responder_resources);
1353		IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1354		IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1355			param->rnr_retry_count);
1356		IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1357	}
1358
1359	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1360		pri_path->sgid;
1361	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1362		pri_path->dgid;
1363	if (pri_ext) {
1364		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1365			->global.interface_id =
1366			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1367		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1368			->global.interface_id =
1369			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1370	}
1371	if (pri_path->hop_limit <= 1) {
1372		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1373			be16_to_cpu(pri_ext ? 0 :
1374					      htons(ntohl(sa_path_get_slid(
1375						      pri_path)))));
1376		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1377			be16_to_cpu(pri_ext ? 0 :
1378					      htons(ntohl(sa_path_get_dlid(
1379						      pri_path)))));
1380	} else {
 
 
 
 
 
 
 
 
 
1381		/* Work-around until there's a way to obtain remote LID info */
1382		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1383			be16_to_cpu(IB_LID_PERMISSIVE));
1384		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1385			be16_to_cpu(IB_LID_PERMISSIVE));
1386	}
1387	IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1388		be32_to_cpu(pri_path->flow_label));
1389	IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1390	IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1391	IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1392	IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1393	IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1394		(pri_path->hop_limit <= 1));
1395	IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1396		cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1397			       pri_path->packet_life_time));
1398
1399	if (alt_path) {
1400		bool alt_ext = false;
1401
1402		if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1403			alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1404						      alt_path->opa.slid);
1405
1406		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1407			alt_path->sgid;
1408		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1409			alt_path->dgid;
1410		if (alt_ext) {
1411			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1412					req_msg)
1413				->global.interface_id =
1414				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1415			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1416					req_msg)
1417				->global.interface_id =
1418				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1419		}
1420		if (alt_path->hop_limit <= 1) {
1421			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1422				be16_to_cpu(
1423					alt_ext ? 0 :
1424						  htons(ntohl(sa_path_get_slid(
1425							  alt_path)))));
1426			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1427				be16_to_cpu(
1428					alt_ext ? 0 :
1429						  htons(ntohl(sa_path_get_dlid(
1430							  alt_path)))));
1431		} else {
1432			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1433				be16_to_cpu(IB_LID_PERMISSIVE));
1434			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1435				be16_to_cpu(IB_LID_PERMISSIVE));
1436		}
1437		IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1438			be32_to_cpu(alt_path->flow_label));
1439		IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1440		IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1441			alt_path->traffic_class);
1442		IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1443			alt_path->hop_limit);
1444		IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1445		IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1446			(alt_path->hop_limit <= 1));
1447		IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1448			cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1449				       alt_path->packet_life_time));
1450	}
1451	IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1452
1453	if (param->private_data && param->private_data_len)
1454		IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1455			    param->private_data_len);
1456}
1457
1458static int cm_validate_req_param(struct ib_cm_req_param *param)
1459{
1460	if (!param->primary_path)
1461		return -EINVAL;
1462
1463	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1464	    param->qp_type != IB_QPT_XRC_INI)
1465		return -EINVAL;
1466
1467	if (param->private_data &&
1468	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1469		return -EINVAL;
1470
1471	if (param->alternate_path &&
1472	    (param->alternate_path->pkey != param->primary_path->pkey ||
1473	     param->alternate_path->mtu != param->primary_path->mtu))
1474		return -EINVAL;
1475
1476	return 0;
1477}
1478
1479int ib_send_cm_req(struct ib_cm_id *cm_id,
1480		   struct ib_cm_req_param *param)
1481{
1482	struct cm_av av = {}, alt_av = {};
1483	struct cm_id_private *cm_id_priv;
1484	struct ib_mad_send_buf *msg;
1485	struct cm_req_msg *req_msg;
1486	unsigned long flags;
1487	int ret;
1488
1489	ret = cm_validate_req_param(param);
1490	if (ret)
1491		return ret;
1492
1493	/* Verify that we're not in timewait. */
1494	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1495	spin_lock_irqsave(&cm_id_priv->lock, flags);
1496	if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1497		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1498		return -EINVAL;
1499	}
1500	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1501
1502	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1503							    id.local_id);
1504	if (IS_ERR(cm_id_priv->timewait_info)) {
1505		ret = PTR_ERR(cm_id_priv->timewait_info);
1506		cm_id_priv->timewait_info = NULL;
1507		return ret;
1508	}
1509
1510	ret = cm_init_av_by_path(param->primary_path,
1511				 param->ppath_sgid_attr, &av);
1512	if (ret)
1513		return ret;
1514	if (param->alternate_path) {
1515		ret = cm_init_av_by_path(param->alternate_path, NULL,
1516					 &alt_av);
1517		if (ret) {
1518			cm_destroy_av(&av);
1519			return ret;
1520		}
1521	}
1522	cm_id->service_id = param->service_id;
1523	cm_id->service_mask = ~cpu_to_be64(0);
1524	cm_id_priv->timeout_ms = cm_convert_to_ms(
1525				    param->primary_path->packet_life_time) * 2 +
1526				 cm_convert_to_ms(
1527				    param->remote_cm_response_timeout);
1528	cm_id_priv->max_cm_retries = param->max_cm_retries;
1529	cm_id_priv->initiator_depth = param->initiator_depth;
1530	cm_id_priv->responder_resources = param->responder_resources;
1531	cm_id_priv->retry_count = param->retry_count;
1532	cm_id_priv->path_mtu = param->primary_path->mtu;
1533	cm_id_priv->pkey = param->primary_path->pkey;
1534	cm_id_priv->qp_type = param->qp_type;
1535
1536	spin_lock_irqsave(&cm_id_priv->lock, flags);
1537
1538	cm_move_av_from_path(&cm_id_priv->av, &av);
 
 
 
 
1539	if (param->alternate_path)
1540		cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
1541
1542	msg = cm_alloc_priv_msg(cm_id_priv);
1543	if (IS_ERR(msg)) {
1544		ret = PTR_ERR(msg);
1545		goto out_unlock;
1546	}
1547
1548	req_msg = (struct cm_req_msg *)msg->mad;
1549	cm_format_req(req_msg, cm_id_priv, param);
1550	cm_id_priv->tid = req_msg->hdr.tid;
1551	msg->timeout_ms = cm_id_priv->timeout_ms;
1552	msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
1553
1554	cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1555	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1556
1557	trace_icm_send_req(&cm_id_priv->id);
1558	ret = ib_post_send_mad(msg, NULL);
1559	if (ret)
1560		goto out_free;
1561	BUG_ON(cm_id->state != IB_CM_IDLE);
1562	cm_id->state = IB_CM_REQ_SENT;
1563	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1564	return 0;
1565out_free:
1566	cm_free_priv_msg(msg);
1567out_unlock:
1568	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1569	return ret;
1570}
1571EXPORT_SYMBOL(ib_send_cm_req);
1572
1573static int cm_issue_rej(struct cm_port *port,
1574			struct ib_mad_recv_wc *mad_recv_wc,
1575			enum ib_cm_rej_reason reason,
1576			enum cm_msg_response msg_rejected,
1577			void *ari, u8 ari_length)
1578{
1579	struct ib_mad_send_buf *msg = NULL;
1580	struct cm_rej_msg *rej_msg, *rcv_msg;
1581	int ret;
1582
1583	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1584	if (ret)
1585		return ret;
1586
1587	/* We just need common CM header information.  Cast to any message. */
1588	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1589	rej_msg = (struct cm_rej_msg *) msg->mad;
1590
1591	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1592	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1593		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1594	IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1595		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1596	IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1597	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1598
1599	if (ari && ari_length) {
1600		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1601		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1602	}
1603
1604	trace_icm_issue_rej(
1605		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1606		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1607	ret = ib_post_send_mad(msg, NULL);
1608	if (ret)
1609		cm_free_response_msg(msg);
1610
1611	return ret;
1612}
1613
1614static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1615{
1616	return ((cpu_to_be16(
1617			IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1618		(ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1619					       req_msg))));
1620}
1621
1622static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
1623				 struct sa_path_rec *path, union ib_gid *gid)
1624{
1625	if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1626		path->rec_type = SA_PATH_REC_TYPE_OPA;
1627	else
1628		path->rec_type = SA_PATH_REC_TYPE_IB;
1629}
1630
1631static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1632					struct sa_path_rec *primary_path,
1633					struct sa_path_rec *alt_path)
 
1634{
1635	u32 lid;
1636
1637	if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1638		sa_path_set_dlid(primary_path,
1639				 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
1640					 req_msg));
1641		sa_path_set_slid(primary_path,
1642				 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1643					 req_msg));
1644	} else {
1645		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1646			CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1647		sa_path_set_dlid(primary_path, lid);
1648
1649		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1650			CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1651		sa_path_set_slid(primary_path, lid);
1652	}
1653
1654	if (!cm_req_has_alt_path(req_msg))
1655		return;
1656
1657	if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1658		sa_path_set_dlid(alt_path,
1659				 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1660					 req_msg));
1661		sa_path_set_slid(alt_path,
1662				 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1663					 req_msg));
1664	} else {
1665		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1666			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1667		sa_path_set_dlid(alt_path, lid);
1668
1669		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1670			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1671		sa_path_set_slid(alt_path, lid);
1672	}
1673}
1674
1675static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1676				     struct sa_path_rec *primary_path,
1677				     struct sa_path_rec *alt_path)
 
1678{
1679	primary_path->dgid =
1680		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1681	primary_path->sgid =
1682		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1683	primary_path->flow_label =
1684		cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1685	primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1686	primary_path->traffic_class =
1687		IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1688	primary_path->reversible = 1;
1689	primary_path->pkey =
1690		cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1691	primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1692	primary_path->mtu_selector = IB_SA_EQ;
1693	primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1694	primary_path->rate_selector = IB_SA_EQ;
1695	primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1696	primary_path->packet_life_time_selector = IB_SA_EQ;
1697	primary_path->packet_life_time =
1698		IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1699	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1700	primary_path->service_id =
1701		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1702	if (sa_path_is_roce(primary_path))
1703		primary_path->roce.route_resolved = false;
1704
1705	if (cm_req_has_alt_path(req_msg)) {
1706		alt_path->dgid = *IBA_GET_MEM_PTR(
1707			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1708		alt_path->sgid = *IBA_GET_MEM_PTR(
1709			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1710		alt_path->flow_label = cpu_to_be32(
1711			IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1712		alt_path->hop_limit =
1713			IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1714		alt_path->traffic_class =
1715			IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1716		alt_path->reversible = 1;
1717		alt_path->pkey =
1718			cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1719		alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1720		alt_path->mtu_selector = IB_SA_EQ;
1721		alt_path->mtu =
1722			IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1723		alt_path->rate_selector = IB_SA_EQ;
1724		alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1725		alt_path->packet_life_time_selector = IB_SA_EQ;
1726		alt_path->packet_life_time =
1727			IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1728		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1729		alt_path->service_id =
1730			cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1731
1732		if (sa_path_is_roce(alt_path))
1733			alt_path->roce.route_resolved = false;
1734	}
1735	cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1736}
1737
1738static u16 cm_get_bth_pkey(struct cm_work *work)
1739{
1740	struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1741	u32 port_num = work->port->port_num;
1742	u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1743	u16 pkey;
1744	int ret;
1745
1746	ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1747	if (ret) {
1748		dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
1749				     port_num, pkey_index, ret);
1750		return 0;
1751	}
1752
1753	return pkey;
1754}
1755
1756/**
1757 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1758 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1759 * reject them as the local_gid will not match the sgid. Therefore,
1760 * change the pathrec's SGID to an IB SGID.
1761 *
1762 * @work: Work completion
1763 * @path: Path record
1764 */
1765static void cm_opa_to_ib_sgid(struct cm_work *work,
1766			      struct sa_path_rec *path)
1767{
1768	struct ib_device *dev = work->port->cm_dev->ib_device;
1769	u32 port_num = work->port->port_num;
1770
1771	if (rdma_cap_opa_ah(dev, port_num) &&
1772	    (ib_is_opa_gid(&path->sgid))) {
1773		union ib_gid sgid;
1774
1775		if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1776			dev_warn(&dev->dev,
1777				 "Error updating sgid in CM request\n");
1778			return;
1779		}
1780
1781		path->sgid = sgid;
1782	}
1783}
1784
1785static void cm_format_req_event(struct cm_work *work,
1786				struct cm_id_private *cm_id_priv,
1787				struct ib_cm_id *listen_id)
1788{
1789	struct cm_req_msg *req_msg;
1790	struct ib_cm_req_event_param *param;
1791
1792	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1793	param = &work->cm_event.param.req_rcvd;
1794	param->listen_id = listen_id;
1795	param->bth_pkey = cm_get_bth_pkey(work);
1796	param->port = cm_id_priv->av.port->port_num;
1797	param->primary_path = &work->path[0];
1798	cm_opa_to_ib_sgid(work, param->primary_path);
1799	if (cm_req_has_alt_path(req_msg)) {
1800		param->alternate_path = &work->path[1];
1801		cm_opa_to_ib_sgid(work, param->alternate_path);
1802	} else {
1803		param->alternate_path = NULL;
1804	}
1805	param->remote_ca_guid =
1806		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1807	param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1808	param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1809	param->qp_type = cm_req_get_qp_type(req_msg);
1810	param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1811	param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1812	param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1813	param->local_cm_response_timeout =
1814		IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1815	param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1816	param->remote_cm_response_timeout =
1817		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1818	param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1819	param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1820	param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1821	param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1822	param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1823	param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1824
1825	work->cm_event.private_data =
1826		IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1827}
1828
1829static void cm_process_work(struct cm_id_private *cm_id_priv,
1830			    struct cm_work *work)
1831{
1832	int ret;
1833
1834	/* We will typically only have the current event to report. */
1835	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1836	cm_free_work(work);
1837
1838	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1839		spin_lock_irq(&cm_id_priv->lock);
1840		work = cm_dequeue_work(cm_id_priv);
1841		spin_unlock_irq(&cm_id_priv->lock);
1842		if (!work)
1843			return;
1844
1845		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1846						&work->cm_event);
1847		cm_free_work(work);
1848	}
1849	cm_deref_id(cm_id_priv);
1850	if (ret)
1851		cm_destroy_id(&cm_id_priv->id, ret);
1852}
1853
1854static void cm_format_mra(struct cm_mra_msg *mra_msg,
1855			  struct cm_id_private *cm_id_priv,
1856			  enum cm_msg_response msg_mraed, u8 service_timeout,
1857			  const void *private_data, u8 private_data_len)
1858{
1859	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1860	IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1861	IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1862		be32_to_cpu(cm_id_priv->id.local_id));
1863	IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1864		be32_to_cpu(cm_id_priv->id.remote_id));
1865	IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1866
1867	if (private_data && private_data_len)
1868		IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1869			    private_data_len);
1870}
1871
1872static void cm_format_rej(struct cm_rej_msg *rej_msg,
1873			  struct cm_id_private *cm_id_priv,
1874			  enum ib_cm_rej_reason reason, void *ari,
1875			  u8 ari_length, const void *private_data,
1876			  u8 private_data_len, enum ib_cm_state state)
1877{
1878	lockdep_assert_held(&cm_id_priv->lock);
1879
1880	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1881	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1882		be32_to_cpu(cm_id_priv->id.remote_id));
1883
1884	switch (state) {
1885	case IB_CM_REQ_RCVD:
1886		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1887		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1888		break;
1889	case IB_CM_MRA_REQ_SENT:
1890		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1891			be32_to_cpu(cm_id_priv->id.local_id));
1892		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1893		break;
1894	case IB_CM_REP_RCVD:
1895	case IB_CM_MRA_REP_SENT:
1896		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1897			be32_to_cpu(cm_id_priv->id.local_id));
1898		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1899		break;
1900	default:
1901		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1902			be32_to_cpu(cm_id_priv->id.local_id));
1903		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1904			CM_MSG_RESPONSE_OTHER);
1905		break;
1906	}
1907
1908	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1909	if (ari && ari_length) {
1910		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1911		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1912	}
1913
1914	if (private_data && private_data_len)
1915		IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1916			    private_data_len);
1917}
1918
1919static void cm_dup_req_handler(struct cm_work *work,
1920			       struct cm_id_private *cm_id_priv)
1921{
1922	struct ib_mad_send_buf *msg = NULL;
1923	int ret;
1924
1925	atomic_long_inc(
1926		&work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
1927
1928	/* Quick state check to discard duplicate REQs. */
1929	spin_lock_irq(&cm_id_priv->lock);
1930	if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1931		spin_unlock_irq(&cm_id_priv->lock);
1932		return;
1933	}
1934	spin_unlock_irq(&cm_id_priv->lock);
1935
1936	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1937	if (ret)
1938		return;
1939
1940	spin_lock_irq(&cm_id_priv->lock);
1941	switch (cm_id_priv->id.state) {
1942	case IB_CM_MRA_REQ_SENT:
1943		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1944			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1945			      cm_id_priv->private_data,
1946			      cm_id_priv->private_data_len);
1947		break;
1948	case IB_CM_TIMEWAIT:
1949		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1950			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1951			      IB_CM_TIMEWAIT);
1952		break;
1953	default:
1954		goto unlock;
1955	}
1956	spin_unlock_irq(&cm_id_priv->lock);
1957
1958	trace_icm_send_dup_req(&cm_id_priv->id);
1959	ret = ib_post_send_mad(msg, NULL);
1960	if (ret)
1961		goto free;
1962	return;
1963
1964unlock:	spin_unlock_irq(&cm_id_priv->lock);
1965free:	cm_free_response_msg(msg);
1966}
1967
1968static struct cm_id_private *cm_match_req(struct cm_work *work,
1969					  struct cm_id_private *cm_id_priv)
1970{
1971	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1972	struct cm_timewait_info *timewait_info;
1973	struct cm_req_msg *req_msg;
1974
1975	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1976
1977	/* Check for possible duplicate REQ. */
1978	spin_lock_irq(&cm.lock);
1979	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1980	if (timewait_info) {
1981		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1982					   timewait_info->work.remote_id);
1983		spin_unlock_irq(&cm.lock);
1984		if (cur_cm_id_priv) {
1985			cm_dup_req_handler(work, cur_cm_id_priv);
1986			cm_deref_id(cur_cm_id_priv);
1987		}
1988		return NULL;
1989	}
1990
1991	/* Check for stale connections. */
1992	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1993	if (timewait_info) {
1994		cm_remove_remote(cm_id_priv);
1995		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1996					   timewait_info->work.remote_id);
1997
1998		spin_unlock_irq(&cm.lock);
1999		cm_issue_rej(work->port, work->mad_recv_wc,
2000			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
2001			     NULL, 0);
2002		if (cur_cm_id_priv) {
2003			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2004			cm_deref_id(cur_cm_id_priv);
2005		}
2006		return NULL;
2007	}
2008
2009	/* Find matching listen request. */
2010	listen_cm_id_priv = cm_find_listen(
2011		cm_id_priv->id.device,
2012		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2013	if (!listen_cm_id_priv) {
2014		cm_remove_remote(cm_id_priv);
2015		spin_unlock_irq(&cm.lock);
2016		cm_issue_rej(work->port, work->mad_recv_wc,
2017			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2018			     NULL, 0);
2019		return NULL;
2020	}
2021	spin_unlock_irq(&cm.lock);
2022	return listen_cm_id_priv;
2023}
2024
2025/*
2026 * Work-around for inter-subnet connections.  If the LIDs are permissive,
2027 * we need to override the LID/SL data in the REQ with the LID information
2028 * in the work completion.
2029 */
2030static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2031{
2032	if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2033		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2034					req_msg)) == IB_LID_PERMISSIVE) {
2035			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2036				be16_to_cpu(ib_lid_be16(wc->slid)));
2037			IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2038		}
2039
2040		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2041					req_msg)) == IB_LID_PERMISSIVE)
2042			IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2043				wc->dlid_path_bits);
2044	}
2045
2046	if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2047		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2048					req_msg)) == IB_LID_PERMISSIVE) {
2049			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2050				be16_to_cpu(ib_lid_be16(wc->slid)));
2051			IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2052		}
2053
2054		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2055					req_msg)) == IB_LID_PERMISSIVE)
2056			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2057				wc->dlid_path_bits);
2058	}
2059}
2060
2061static int cm_req_handler(struct cm_work *work)
2062{
2063	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2064	struct cm_req_msg *req_msg;
2065	const struct ib_global_route *grh;
2066	const struct ib_gid_attr *gid_attr;
2067	int ret;
2068
2069	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2070
2071	cm_id_priv =
2072		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2073	if (IS_ERR(cm_id_priv))
2074		return PTR_ERR(cm_id_priv);
2075
2076	cm_id_priv->id.remote_id =
2077		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2078	cm_id_priv->id.service_id =
2079		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2080	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2081	cm_id_priv->tid = req_msg->hdr.tid;
2082	cm_id_priv->timeout_ms = cm_convert_to_ms(
2083		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2084	cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2085	cm_id_priv->remote_qpn =
2086		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2087	cm_id_priv->initiator_depth =
2088		IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2089	cm_id_priv->responder_resources =
2090		IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2091	cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2092	cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2093	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2094	cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2095	cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2096	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2097
2098	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2099				      work->mad_recv_wc->recv_buf.grh,
2100				      &cm_id_priv->av);
2101	if (ret)
2102		goto destroy;
2103	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2104							    id.local_id);
2105	if (IS_ERR(cm_id_priv->timewait_info)) {
2106		ret = PTR_ERR(cm_id_priv->timewait_info);
2107		cm_id_priv->timewait_info = NULL;
2108		goto destroy;
2109	}
2110	cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2111	cm_id_priv->timewait_info->remote_ca_guid =
2112		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2113	cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2114
2115	/*
2116	 * Note that the ID pointer is not in the xarray at this point,
2117	 * so this set is only visible to the local thread.
2118	 */
2119	cm_id_priv->id.state = IB_CM_REQ_RCVD;
2120
2121	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2122	if (!listen_cm_id_priv) {
2123		trace_icm_no_listener_err(&cm_id_priv->id);
2124		cm_id_priv->id.state = IB_CM_IDLE;
2125		ret = -EINVAL;
2126		goto destroy;
2127	}
2128
2129	memset(&work->path[0], 0, sizeof(work->path[0]));
2130	if (cm_req_has_alt_path(req_msg))
2131		memset(&work->path[1], 0, sizeof(work->path[1]));
2132	grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2133	gid_attr = grh->sgid_attr;
2134
2135	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
2136		work->path[0].rec_type =
2137			sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2138	} else {
2139		cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2140		cm_path_set_rec_type(
2141			work->port->cm_dev->ib_device, work->port->port_num,
2142			&work->path[0],
2143			IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2144					req_msg));
2145	}
2146	if (cm_req_has_alt_path(req_msg))
2147		work->path[1].rec_type = work->path[0].rec_type;
2148	cm_format_paths_from_req(req_msg, &work->path[0],
2149				 &work->path[1]);
2150	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2151		sa_path_set_dmac(&work->path[0],
2152				 cm_id_priv->av.ah_attr.roce.dmac);
2153	work->path[0].hop_limit = grh->hop_limit;
2154
2155	/* This destroy call is needed to pair with cm_init_av_for_response */
2156	cm_destroy_av(&cm_id_priv->av);
2157	ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
2158	if (ret) {
2159		int err;
2160
2161		err = rdma_query_gid(work->port->cm_dev->ib_device,
2162				     work->port->port_num, 0,
2163				     &work->path[0].sgid);
2164		if (err)
2165			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2166				       NULL, 0, NULL, 0);
2167		else
2168			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2169				       &work->path[0].sgid,
2170				       sizeof(work->path[0].sgid),
2171				       NULL, 0);
2172		goto rejected;
2173	}
 
 
 
 
2174	if (cm_req_has_alt_path(req_msg)) {
2175		ret = cm_init_av_by_path(&work->path[1], NULL,
2176					 &cm_id_priv->alt_av);
2177		if (ret) {
2178			ib_send_cm_rej(&cm_id_priv->id,
2179				       IB_CM_REJ_INVALID_ALT_GID,
2180				       &work->path[0].sgid,
2181				       sizeof(work->path[0].sgid), NULL, 0);
2182			goto rejected;
2183		}
2184	}
2185
2186	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2187	cm_id_priv->id.context = listen_cm_id_priv->id.context;
2188	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2189
2190	/* Now MAD handlers can see the new ID */
2191	spin_lock_irq(&cm_id_priv->lock);
2192	cm_finalize_id(cm_id_priv);
2193
2194	/* Refcount belongs to the event, pairs with cm_process_work() */
2195	refcount_inc(&cm_id_priv->refcount);
2196	cm_queue_work_unlock(cm_id_priv, work);
2197	/*
2198	 * Since this ID was just created and was not made visible to other MAD
2199	 * handlers until the cm_finalize_id() above we know that the
2200	 * cm_process_work() will deliver the event and the listen_cm_id
2201	 * embedded in the event can be derefed here.
2202	 */
2203	cm_deref_id(listen_cm_id_priv);
2204	return 0;
2205
2206rejected:
2207	cm_deref_id(listen_cm_id_priv);
2208destroy:
2209	ib_destroy_cm_id(&cm_id_priv->id);
2210	return ret;
2211}
2212
2213static void cm_format_rep(struct cm_rep_msg *rep_msg,
2214			  struct cm_id_private *cm_id_priv,
2215			  struct ib_cm_rep_param *param)
2216{
2217	cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2218			      param->ece.attr_mod);
2219	IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2220		be32_to_cpu(cm_id_priv->id.local_id));
2221	IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2222		be32_to_cpu(cm_id_priv->id.remote_id));
2223	IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2224	IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2225		param->responder_resources);
2226	IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2227		cm_id_priv->av.port->cm_dev->ack_delay);
2228	IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2229	IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2230	IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2231		be64_to_cpu(cm_id_priv->id.device->node_guid));
2232
2233	if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2234		IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2235			param->initiator_depth);
2236		IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2237			param->flow_control);
2238		IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2239		IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2240	} else {
2241		IBA_SET(CM_REP_SRQ, rep_msg, 1);
2242		IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2243	}
2244
2245	IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2246	IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2247	IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2248
2249	if (param->private_data && param->private_data_len)
2250		IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2251			    param->private_data_len);
2252}
2253
2254int ib_send_cm_rep(struct ib_cm_id *cm_id,
2255		   struct ib_cm_rep_param *param)
2256{
2257	struct cm_id_private *cm_id_priv;
2258	struct ib_mad_send_buf *msg;
2259	struct cm_rep_msg *rep_msg;
2260	unsigned long flags;
2261	int ret;
2262
2263	if (param->private_data &&
2264	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2265		return -EINVAL;
2266
2267	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2268	spin_lock_irqsave(&cm_id_priv->lock, flags);
2269	if (cm_id->state != IB_CM_REQ_RCVD &&
2270	    cm_id->state != IB_CM_MRA_REQ_SENT) {
2271		trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2272		ret = -EINVAL;
2273		goto out;
2274	}
2275
2276	msg = cm_alloc_priv_msg(cm_id_priv);
2277	if (IS_ERR(msg)) {
2278		ret = PTR_ERR(msg);
2279		goto out;
2280	}
2281
2282	rep_msg = (struct cm_rep_msg *) msg->mad;
2283	cm_format_rep(rep_msg, cm_id_priv, param);
2284	msg->timeout_ms = cm_id_priv->timeout_ms;
2285	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2286
2287	trace_icm_send_rep(cm_id);
2288	ret = ib_post_send_mad(msg, NULL);
2289	if (ret)
2290		goto out_free;
2291
2292	cm_id->state = IB_CM_REP_SENT;
2293	cm_id_priv->initiator_depth = param->initiator_depth;
2294	cm_id_priv->responder_resources = param->responder_resources;
2295	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2296	WARN_ONCE(param->qp_num & 0xFF000000,
2297		  "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2298		  param->qp_num);
2299	cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2300	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2301	return 0;
2302
2303out_free:
2304	cm_free_priv_msg(msg);
2305out:
2306	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2307	return ret;
2308}
2309EXPORT_SYMBOL(ib_send_cm_rep);
2310
2311static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2312			  struct cm_id_private *cm_id_priv,
2313			  const void *private_data,
2314			  u8 private_data_len)
2315{
2316	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2317	IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2318		be32_to_cpu(cm_id_priv->id.local_id));
2319	IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2320		be32_to_cpu(cm_id_priv->id.remote_id));
2321
2322	if (private_data && private_data_len)
2323		IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2324			    private_data_len);
2325}
2326
2327int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2328		   const void *private_data,
2329		   u8 private_data_len)
2330{
2331	struct cm_id_private *cm_id_priv;
2332	struct ib_mad_send_buf *msg;
2333	unsigned long flags;
2334	void *data;
2335	int ret;
2336
2337	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2338		return -EINVAL;
2339
2340	data = cm_copy_private_data(private_data, private_data_len);
2341	if (IS_ERR(data))
2342		return PTR_ERR(data);
2343
2344	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2345	spin_lock_irqsave(&cm_id_priv->lock, flags);
2346	if (cm_id->state != IB_CM_REP_RCVD &&
2347	    cm_id->state != IB_CM_MRA_REP_SENT) {
2348		trace_icm_send_cm_rtu_err(cm_id);
2349		ret = -EINVAL;
2350		goto error;
2351	}
2352
2353	msg = cm_alloc_msg(cm_id_priv);
2354	if (IS_ERR(msg)) {
2355		ret = PTR_ERR(msg);
2356		goto error;
2357	}
2358
2359	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2360		      private_data, private_data_len);
2361
2362	trace_icm_send_rtu(cm_id);
2363	ret = ib_post_send_mad(msg, NULL);
2364	if (ret) {
2365		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2366		cm_free_msg(msg);
2367		kfree(data);
2368		return ret;
2369	}
2370
2371	cm_id->state = IB_CM_ESTABLISHED;
2372	cm_set_private_data(cm_id_priv, data, private_data_len);
2373	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2374	return 0;
2375
2376error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2377	kfree(data);
2378	return ret;
2379}
2380EXPORT_SYMBOL(ib_send_cm_rtu);
2381
2382static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2383{
2384	struct cm_rep_msg *rep_msg;
2385	struct ib_cm_rep_event_param *param;
2386
2387	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2388	param = &work->cm_event.param.rep_rcvd;
2389	param->remote_ca_guid =
2390		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2391	param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2392	param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2393	param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2394	param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2395	param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2396	param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2397	param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2398	param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2399	param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2400	param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2401	param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2402	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2403	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2404	param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2405
2406	work->cm_event.private_data =
2407		IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2408}
2409
2410static void cm_dup_rep_handler(struct cm_work *work)
2411{
2412	struct cm_id_private *cm_id_priv;
2413	struct cm_rep_msg *rep_msg;
2414	struct ib_mad_send_buf *msg = NULL;
2415	int ret;
2416
2417	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2418	cm_id_priv = cm_acquire_id(
2419		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2420		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2421	if (!cm_id_priv)
2422		return;
2423
2424	atomic_long_inc(
2425		&work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
2426	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2427	if (ret)
2428		goto deref;
2429
2430	spin_lock_irq(&cm_id_priv->lock);
2431	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2432		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2433			      cm_id_priv->private_data,
2434			      cm_id_priv->private_data_len);
2435	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2436		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2437			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2438			      cm_id_priv->private_data,
2439			      cm_id_priv->private_data_len);
2440	else
2441		goto unlock;
2442	spin_unlock_irq(&cm_id_priv->lock);
2443
2444	trace_icm_send_dup_rep(&cm_id_priv->id);
2445	ret = ib_post_send_mad(msg, NULL);
2446	if (ret)
2447		goto free;
2448	goto deref;
2449
2450unlock:	spin_unlock_irq(&cm_id_priv->lock);
2451free:	cm_free_response_msg(msg);
2452deref:	cm_deref_id(cm_id_priv);
2453}
2454
2455static int cm_rep_handler(struct cm_work *work)
2456{
2457	struct cm_id_private *cm_id_priv;
2458	struct cm_rep_msg *rep_msg;
2459	int ret;
2460	struct cm_id_private *cur_cm_id_priv;
2461	struct cm_timewait_info *timewait_info;
2462
2463	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2464	cm_id_priv = cm_acquire_id(
2465		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2466	if (!cm_id_priv) {
2467		cm_dup_rep_handler(work);
2468		trace_icm_remote_no_priv_err(
2469			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2470		return -EINVAL;
2471	}
2472
2473	cm_format_rep_event(work, cm_id_priv->qp_type);
2474
2475	spin_lock_irq(&cm_id_priv->lock);
2476	switch (cm_id_priv->id.state) {
2477	case IB_CM_REQ_SENT:
2478	case IB_CM_MRA_REQ_RCVD:
2479		break;
2480	default:
2481		ret = -EINVAL;
2482		trace_icm_rep_unknown_err(
2483			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2484			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2485			cm_id_priv->id.state);
2486		spin_unlock_irq(&cm_id_priv->lock);
2487		goto error;
2488	}
2489
2490	cm_id_priv->timewait_info->work.remote_id =
2491		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2492	cm_id_priv->timewait_info->remote_ca_guid =
2493		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2494	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2495
2496	spin_lock(&cm.lock);
2497	/* Check for duplicate REP. */
2498	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2499		spin_unlock(&cm.lock);
2500		spin_unlock_irq(&cm_id_priv->lock);
2501		ret = -EINVAL;
2502		trace_icm_insert_failed_err(
2503			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2504		goto error;
2505	}
2506	/* Check for a stale connection. */
2507	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2508	if (timewait_info) {
2509		cm_remove_remote(cm_id_priv);
2510		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2511					   timewait_info->work.remote_id);
2512
2513		spin_unlock(&cm.lock);
2514		spin_unlock_irq(&cm_id_priv->lock);
2515		cm_issue_rej(work->port, work->mad_recv_wc,
2516			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2517			     NULL, 0);
2518		ret = -EINVAL;
2519		trace_icm_staleconn_err(
2520			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2521			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2522
2523		if (cur_cm_id_priv) {
2524			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2525			cm_deref_id(cur_cm_id_priv);
2526		}
2527
2528		goto error;
2529	}
2530	spin_unlock(&cm.lock);
2531
2532	cm_id_priv->id.state = IB_CM_REP_RCVD;
2533	cm_id_priv->id.remote_id =
2534		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2535	cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2536	cm_id_priv->initiator_depth =
2537		IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2538	cm_id_priv->responder_resources =
2539		IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2540	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2541	cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2542	cm_id_priv->target_ack_delay =
2543		IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2544	cm_id_priv->av.timeout =
2545			cm_ack_timeout(cm_id_priv->target_ack_delay,
2546				       cm_id_priv->av.timeout - 1);
2547	cm_id_priv->alt_av.timeout =
2548			cm_ack_timeout(cm_id_priv->target_ack_delay,
2549				       cm_id_priv->alt_av.timeout - 1);
2550
2551	ib_cancel_mad(cm_id_priv->msg);
2552	cm_queue_work_unlock(cm_id_priv, work);
2553	return 0;
2554
2555error:
2556	cm_deref_id(cm_id_priv);
2557	return ret;
2558}
2559
2560static int cm_establish_handler(struct cm_work *work)
2561{
2562	struct cm_id_private *cm_id_priv;
2563
2564	/* See comment in cm_establish about lookup. */
2565	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2566	if (!cm_id_priv)
2567		return -EINVAL;
2568
2569	spin_lock_irq(&cm_id_priv->lock);
2570	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2571		spin_unlock_irq(&cm_id_priv->lock);
2572		goto out;
2573	}
2574
2575	ib_cancel_mad(cm_id_priv->msg);
2576	cm_queue_work_unlock(cm_id_priv, work);
2577	return 0;
2578out:
2579	cm_deref_id(cm_id_priv);
2580	return -EINVAL;
2581}
2582
2583static int cm_rtu_handler(struct cm_work *work)
2584{
2585	struct cm_id_private *cm_id_priv;
2586	struct cm_rtu_msg *rtu_msg;
2587
2588	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2589	cm_id_priv = cm_acquire_id(
2590		cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2591		cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2592	if (!cm_id_priv)
2593		return -EINVAL;
2594
2595	work->cm_event.private_data =
2596		IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2597
2598	spin_lock_irq(&cm_id_priv->lock);
2599	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2600	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2601		spin_unlock_irq(&cm_id_priv->lock);
2602		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2603						     [CM_RTU_COUNTER]);
2604		goto out;
2605	}
2606	cm_id_priv->id.state = IB_CM_ESTABLISHED;
2607
2608	ib_cancel_mad(cm_id_priv->msg);
2609	cm_queue_work_unlock(cm_id_priv, work);
2610	return 0;
2611out:
2612	cm_deref_id(cm_id_priv);
2613	return -EINVAL;
2614}
2615
2616static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2617			  struct cm_id_private *cm_id_priv,
2618			  const void *private_data,
2619			  u8 private_data_len)
2620{
2621	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2622			  cm_form_tid(cm_id_priv));
2623	IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2624		be32_to_cpu(cm_id_priv->id.local_id));
2625	IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2626		be32_to_cpu(cm_id_priv->id.remote_id));
2627	IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2628		be32_to_cpu(cm_id_priv->remote_qpn));
2629
2630	if (private_data && private_data_len)
2631		IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2632			    private_data_len);
2633}
2634
2635static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2636			       const void *private_data, u8 private_data_len)
2637{
2638	struct ib_mad_send_buf *msg;
2639	int ret;
2640
2641	lockdep_assert_held(&cm_id_priv->lock);
2642
2643	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2644		return -EINVAL;
2645
2646	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2647		trace_icm_dreq_skipped(&cm_id_priv->id);
2648		return -EINVAL;
2649	}
2650
2651	if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2652	    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2653		ib_cancel_mad(cm_id_priv->msg);
2654
2655	msg = cm_alloc_priv_msg(cm_id_priv);
2656	if (IS_ERR(msg)) {
2657		cm_enter_timewait(cm_id_priv);
2658		return PTR_ERR(msg);
2659	}
2660
2661	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2662		       private_data, private_data_len);
2663	msg->timeout_ms = cm_id_priv->timeout_ms;
2664	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2665
2666	trace_icm_send_dreq(&cm_id_priv->id);
2667	ret = ib_post_send_mad(msg, NULL);
2668	if (ret) {
2669		cm_enter_timewait(cm_id_priv);
2670		cm_free_priv_msg(msg);
2671		return ret;
2672	}
2673
2674	cm_id_priv->id.state = IB_CM_DREQ_SENT;
2675	return 0;
2676}
2677
2678int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2679		    u8 private_data_len)
2680{
2681	struct cm_id_private *cm_id_priv =
2682		container_of(cm_id, struct cm_id_private, id);
2683	unsigned long flags;
2684	int ret;
2685
2686	spin_lock_irqsave(&cm_id_priv->lock, flags);
2687	ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2688	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2689	return ret;
2690}
2691EXPORT_SYMBOL(ib_send_cm_dreq);
2692
2693static void cm_format_drep(struct cm_drep_msg *drep_msg,
2694			  struct cm_id_private *cm_id_priv,
2695			  const void *private_data,
2696			  u8 private_data_len)
2697{
2698	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2699	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2700		be32_to_cpu(cm_id_priv->id.local_id));
2701	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2702		be32_to_cpu(cm_id_priv->id.remote_id));
2703
2704	if (private_data && private_data_len)
2705		IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2706			    private_data_len);
2707}
2708
2709static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2710			       void *private_data, u8 private_data_len)
2711{
2712	struct ib_mad_send_buf *msg;
2713	int ret;
2714
2715	lockdep_assert_held(&cm_id_priv->lock);
2716
2717	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2718		return -EINVAL;
2719
2720	if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2721		trace_icm_send_drep_err(&cm_id_priv->id);
2722		kfree(private_data);
2723		return -EINVAL;
2724	}
2725
2726	cm_set_private_data(cm_id_priv, private_data, private_data_len);
2727	cm_enter_timewait(cm_id_priv);
2728
2729	msg = cm_alloc_msg(cm_id_priv);
2730	if (IS_ERR(msg))
2731		return PTR_ERR(msg);
2732
2733	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2734		       private_data, private_data_len);
2735
2736	trace_icm_send_drep(&cm_id_priv->id);
2737	ret = ib_post_send_mad(msg, NULL);
2738	if (ret) {
2739		cm_free_msg(msg);
2740		return ret;
2741	}
2742	return 0;
2743}
2744
2745int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2746		    u8 private_data_len)
2747{
2748	struct cm_id_private *cm_id_priv =
2749		container_of(cm_id, struct cm_id_private, id);
2750	unsigned long flags;
2751	void *data;
2752	int ret;
2753
2754	data = cm_copy_private_data(private_data, private_data_len);
2755	if (IS_ERR(data))
2756		return PTR_ERR(data);
2757
2758	spin_lock_irqsave(&cm_id_priv->lock, flags);
2759	ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2760	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2761	return ret;
2762}
2763EXPORT_SYMBOL(ib_send_cm_drep);
2764
2765static int cm_issue_drep(struct cm_port *port,
2766			 struct ib_mad_recv_wc *mad_recv_wc)
2767{
2768	struct ib_mad_send_buf *msg = NULL;
2769	struct cm_dreq_msg *dreq_msg;
2770	struct cm_drep_msg *drep_msg;
2771	int ret;
2772
2773	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2774	if (ret)
2775		return ret;
2776
2777	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2778	drep_msg = (struct cm_drep_msg *) msg->mad;
2779
2780	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2781	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2782		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2783	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2784		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2785
2786	trace_icm_issue_drep(
2787		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2788		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2789	ret = ib_post_send_mad(msg, NULL);
2790	if (ret)
2791		cm_free_response_msg(msg);
2792
2793	return ret;
2794}
2795
2796static int cm_dreq_handler(struct cm_work *work)
2797{
2798	struct cm_id_private *cm_id_priv;
2799	struct cm_dreq_msg *dreq_msg;
2800	struct ib_mad_send_buf *msg = NULL;
2801
2802	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2803	cm_id_priv = cm_acquire_id(
2804		cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2805		cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2806	if (!cm_id_priv) {
2807		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2808						     [CM_DREQ_COUNTER]);
2809		cm_issue_drep(work->port, work->mad_recv_wc);
2810		trace_icm_no_priv_err(
2811			IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2812			IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2813		return -EINVAL;
2814	}
2815
2816	work->cm_event.private_data =
2817		IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2818
2819	spin_lock_irq(&cm_id_priv->lock);
2820	if (cm_id_priv->local_qpn !=
2821	    cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2822		goto unlock;
2823
2824	switch (cm_id_priv->id.state) {
2825	case IB_CM_REP_SENT:
2826	case IB_CM_DREQ_SENT:
 
2827		ib_cancel_mad(cm_id_priv->msg);
2828		break;
2829	case IB_CM_ESTABLISHED:
2830		if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2831		    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2832			ib_cancel_mad(cm_id_priv->msg);
2833		break;
2834	case IB_CM_MRA_REP_RCVD:
2835		break;
2836	case IB_CM_TIMEWAIT:
2837		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2838						     [CM_DREQ_COUNTER]);
2839		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2840		if (IS_ERR(msg))
2841			goto unlock;
2842
2843		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2844			       cm_id_priv->private_data,
2845			       cm_id_priv->private_data_len);
2846		spin_unlock_irq(&cm_id_priv->lock);
2847
2848		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2849		    ib_post_send_mad(msg, NULL))
2850			cm_free_response_msg(msg);
2851		goto deref;
2852	case IB_CM_DREQ_RCVD:
2853		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2854						     [CM_DREQ_COUNTER]);
2855		goto unlock;
2856	default:
2857		trace_icm_dreq_unknown_err(&cm_id_priv->id);
2858		goto unlock;
2859	}
2860	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2861	cm_id_priv->tid = dreq_msg->hdr.tid;
2862	cm_queue_work_unlock(cm_id_priv, work);
2863	return 0;
2864
2865unlock:	spin_unlock_irq(&cm_id_priv->lock);
2866deref:	cm_deref_id(cm_id_priv);
2867	return -EINVAL;
2868}
2869
2870static int cm_drep_handler(struct cm_work *work)
2871{
2872	struct cm_id_private *cm_id_priv;
2873	struct cm_drep_msg *drep_msg;
2874
2875	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2876	cm_id_priv = cm_acquire_id(
2877		cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2878		cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2879	if (!cm_id_priv)
2880		return -EINVAL;
2881
2882	work->cm_event.private_data =
2883		IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2884
2885	spin_lock_irq(&cm_id_priv->lock);
2886	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2887	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2888		spin_unlock_irq(&cm_id_priv->lock);
2889		goto out;
2890	}
2891	cm_enter_timewait(cm_id_priv);
2892
2893	ib_cancel_mad(cm_id_priv->msg);
2894	cm_queue_work_unlock(cm_id_priv, work);
2895	return 0;
2896out:
2897	cm_deref_id(cm_id_priv);
2898	return -EINVAL;
2899}
2900
2901static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2902			      enum ib_cm_rej_reason reason, void *ari,
2903			      u8 ari_length, const void *private_data,
2904			      u8 private_data_len)
2905{
2906	enum ib_cm_state state = cm_id_priv->id.state;
2907	struct ib_mad_send_buf *msg;
2908	int ret;
2909
2910	lockdep_assert_held(&cm_id_priv->lock);
2911
2912	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2913	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2914		return -EINVAL;
2915
 
 
2916	switch (state) {
2917	case IB_CM_REQ_SENT:
2918	case IB_CM_MRA_REQ_RCVD:
2919	case IB_CM_REQ_RCVD:
2920	case IB_CM_MRA_REQ_SENT:
2921	case IB_CM_REP_RCVD:
2922	case IB_CM_MRA_REP_SENT:
2923		cm_reset_to_idle(cm_id_priv);
2924		msg = cm_alloc_msg(cm_id_priv);
2925		if (IS_ERR(msg))
2926			return PTR_ERR(msg);
2927		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2928			      ari, ari_length, private_data, private_data_len,
2929			      state);
2930		break;
2931	case IB_CM_REP_SENT:
2932	case IB_CM_MRA_REP_RCVD:
2933		cm_enter_timewait(cm_id_priv);
2934		msg = cm_alloc_msg(cm_id_priv);
2935		if (IS_ERR(msg))
2936			return PTR_ERR(msg);
2937		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2938			      ari, ari_length, private_data, private_data_len,
2939			      state);
2940		break;
2941	default:
2942		trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2943		return -EINVAL;
2944	}
2945
2946	trace_icm_send_rej(&cm_id_priv->id, reason);
2947	ret = ib_post_send_mad(msg, NULL);
2948	if (ret) {
2949		cm_free_msg(msg);
2950		return ret;
2951	}
2952
2953	return 0;
2954}
2955
2956int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2957		   void *ari, u8 ari_length, const void *private_data,
2958		   u8 private_data_len)
2959{
2960	struct cm_id_private *cm_id_priv =
2961		container_of(cm_id, struct cm_id_private, id);
2962	unsigned long flags;
2963	int ret;
2964
2965	spin_lock_irqsave(&cm_id_priv->lock, flags);
2966	ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2967				 private_data, private_data_len);
2968	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2969	return ret;
2970}
2971EXPORT_SYMBOL(ib_send_cm_rej);
2972
2973static void cm_format_rej_event(struct cm_work *work)
2974{
2975	struct cm_rej_msg *rej_msg;
2976	struct ib_cm_rej_event_param *param;
2977
2978	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2979	param = &work->cm_event.param.rej_rcvd;
2980	param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2981	param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2982	param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2983	work->cm_event.private_data =
2984		IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2985}
2986
2987static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2988{
2989	struct cm_id_private *cm_id_priv;
2990	__be32 remote_id;
2991
2992	remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
2993
2994	if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
2995		cm_id_priv = cm_find_remote_id(
2996			*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
2997			remote_id);
2998	} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
2999		   CM_MSG_RESPONSE_REQ)
3000		cm_id_priv = cm_acquire_id(
3001			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3002			0);
3003	else
3004		cm_id_priv = cm_acquire_id(
3005			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3006			remote_id);
3007
3008	return cm_id_priv;
3009}
3010
3011static int cm_rej_handler(struct cm_work *work)
3012{
3013	struct cm_id_private *cm_id_priv;
3014	struct cm_rej_msg *rej_msg;
3015
3016	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3017	cm_id_priv = cm_acquire_rejected_id(rej_msg);
3018	if (!cm_id_priv)
3019		return -EINVAL;
3020
3021	cm_format_rej_event(work);
3022
3023	spin_lock_irq(&cm_id_priv->lock);
3024	switch (cm_id_priv->id.state) {
3025	case IB_CM_REQ_SENT:
3026	case IB_CM_MRA_REQ_RCVD:
3027	case IB_CM_REP_SENT:
3028	case IB_CM_MRA_REP_RCVD:
3029		ib_cancel_mad(cm_id_priv->msg);
3030		fallthrough;
3031	case IB_CM_REQ_RCVD:
3032	case IB_CM_MRA_REQ_SENT:
3033		if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3034			cm_enter_timewait(cm_id_priv);
3035		else
3036			cm_reset_to_idle(cm_id_priv);
3037		break;
3038	case IB_CM_DREQ_SENT:
3039		ib_cancel_mad(cm_id_priv->msg);
3040		fallthrough;
3041	case IB_CM_REP_RCVD:
3042	case IB_CM_MRA_REP_SENT:
3043		cm_enter_timewait(cm_id_priv);
3044		break;
3045	case IB_CM_ESTABLISHED:
3046		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3047		    cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3048			if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3049				ib_cancel_mad(cm_id_priv->msg);
3050			cm_enter_timewait(cm_id_priv);
3051			break;
3052		}
3053		fallthrough;
3054	default:
3055		trace_icm_rej_unknown_err(&cm_id_priv->id);
3056		spin_unlock_irq(&cm_id_priv->lock);
3057		goto out;
3058	}
3059
3060	cm_queue_work_unlock(cm_id_priv, work);
3061	return 0;
3062out:
3063	cm_deref_id(cm_id_priv);
3064	return -EINVAL;
3065}
3066
3067int ib_send_cm_mra(struct ib_cm_id *cm_id,
3068		   u8 service_timeout,
3069		   const void *private_data,
3070		   u8 private_data_len)
3071{
3072	struct cm_id_private *cm_id_priv;
3073	struct ib_mad_send_buf *msg;
3074	enum ib_cm_state cm_state;
3075	enum ib_cm_lap_state lap_state;
3076	enum cm_msg_response msg_response;
3077	void *data;
3078	unsigned long flags;
3079	int ret;
3080
3081	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3082		return -EINVAL;
3083
3084	data = cm_copy_private_data(private_data, private_data_len);
3085	if (IS_ERR(data))
3086		return PTR_ERR(data);
3087
3088	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3089
3090	spin_lock_irqsave(&cm_id_priv->lock, flags);
3091	switch (cm_id_priv->id.state) {
3092	case IB_CM_REQ_RCVD:
3093		cm_state = IB_CM_MRA_REQ_SENT;
3094		lap_state = cm_id->lap_state;
3095		msg_response = CM_MSG_RESPONSE_REQ;
3096		break;
3097	case IB_CM_REP_RCVD:
3098		cm_state = IB_CM_MRA_REP_SENT;
3099		lap_state = cm_id->lap_state;
3100		msg_response = CM_MSG_RESPONSE_REP;
3101		break;
3102	case IB_CM_ESTABLISHED:
3103		if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3104			cm_state = cm_id->state;
3105			lap_state = IB_CM_MRA_LAP_SENT;
3106			msg_response = CM_MSG_RESPONSE_OTHER;
3107			break;
3108		}
3109		fallthrough;
3110	default:
3111		trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3112		ret = -EINVAL;
3113		goto error_unlock;
3114	}
3115
3116	if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3117		msg = cm_alloc_msg(cm_id_priv);
3118		if (IS_ERR(msg)) {
3119			ret = PTR_ERR(msg);
3120			goto error_unlock;
3121		}
3122
3123		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3124			      msg_response, service_timeout,
3125			      private_data, private_data_len);
3126		trace_icm_send_mra(cm_id);
3127		ret = ib_post_send_mad(msg, NULL);
3128		if (ret)
3129			goto error_free_msg;
3130	}
3131
3132	cm_id->state = cm_state;
3133	cm_id->lap_state = lap_state;
3134	cm_id_priv->service_timeout = service_timeout;
3135	cm_set_private_data(cm_id_priv, data, private_data_len);
3136	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3137	return 0;
3138
3139error_free_msg:
3140	cm_free_msg(msg);
3141error_unlock:
3142	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3143	kfree(data);
3144	return ret;
3145}
3146EXPORT_SYMBOL(ib_send_cm_mra);
3147
3148static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3149{
3150	switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3151	case CM_MSG_RESPONSE_REQ:
3152		return cm_acquire_id(
3153			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3154			0);
3155	case CM_MSG_RESPONSE_REP:
3156	case CM_MSG_RESPONSE_OTHER:
3157		return cm_acquire_id(
3158			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3159			cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3160	default:
3161		return NULL;
3162	}
3163}
3164
3165static int cm_mra_handler(struct cm_work *work)
3166{
3167	struct cm_id_private *cm_id_priv;
3168	struct cm_mra_msg *mra_msg;
3169	int timeout;
3170
3171	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3172	cm_id_priv = cm_acquire_mraed_id(mra_msg);
3173	if (!cm_id_priv)
3174		return -EINVAL;
3175
3176	work->cm_event.private_data =
3177		IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3178	work->cm_event.param.mra_rcvd.service_timeout =
3179		IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3180	timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3181		  cm_convert_to_ms(cm_id_priv->av.timeout);
3182
3183	spin_lock_irq(&cm_id_priv->lock);
3184	switch (cm_id_priv->id.state) {
3185	case IB_CM_REQ_SENT:
3186		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3187			    CM_MSG_RESPONSE_REQ ||
3188		    ib_modify_mad(cm_id_priv->msg, timeout))
3189			goto out;
3190		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3191		break;
3192	case IB_CM_REP_SENT:
3193		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3194			    CM_MSG_RESPONSE_REP ||
3195		    ib_modify_mad(cm_id_priv->msg, timeout))
3196			goto out;
3197		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3198		break;
3199	case IB_CM_ESTABLISHED:
3200		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3201			    CM_MSG_RESPONSE_OTHER ||
3202		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3203		    ib_modify_mad(cm_id_priv->msg, timeout)) {
3204			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3205				atomic_long_inc(
3206					&work->port->counters[CM_RECV_DUPLICATES]
3207							     [CM_MRA_COUNTER]);
3208			goto out;
3209		}
3210		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3211		break;
3212	case IB_CM_MRA_REQ_RCVD:
3213	case IB_CM_MRA_REP_RCVD:
3214		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3215						     [CM_MRA_COUNTER]);
3216		fallthrough;
3217	default:
3218		trace_icm_mra_unknown_err(&cm_id_priv->id);
3219		goto out;
3220	}
3221
3222	cm_id_priv->msg->context[1] = (void *) (unsigned long)
3223				      cm_id_priv->id.state;
3224	cm_queue_work_unlock(cm_id_priv, work);
3225	return 0;
3226out:
3227	spin_unlock_irq(&cm_id_priv->lock);
3228	cm_deref_id(cm_id_priv);
3229	return -EINVAL;
3230}
3231
3232static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3233					struct sa_path_rec *path)
3234{
3235	u32 lid;
3236
3237	if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3238		sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3239					       lap_msg));
3240		sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3241					       lap_msg));
3242	} else {
3243		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3244			CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3245		sa_path_set_dlid(path, lid);
3246
3247		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3248			CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3249		sa_path_set_slid(path, lid);
3250	}
3251}
3252
3253static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3254				    struct sa_path_rec *path,
3255				    struct cm_lap_msg *lap_msg)
3256{
3257	path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3258	path->sgid =
3259		*IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3260	path->flow_label =
3261		cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3262	path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3263	path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3264	path->reversible = 1;
3265	path->pkey = cm_id_priv->pkey;
3266	path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3267	path->mtu_selector = IB_SA_EQ;
3268	path->mtu = cm_id_priv->path_mtu;
3269	path->rate_selector = IB_SA_EQ;
3270	path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3271	path->packet_life_time_selector = IB_SA_EQ;
3272	path->packet_life_time =
3273		IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3274	path->packet_life_time -= (path->packet_life_time > 0);
3275	cm_format_path_lid_from_lap(lap_msg, path);
3276}
3277
3278static int cm_lap_handler(struct cm_work *work)
3279{
3280	struct cm_id_private *cm_id_priv;
3281	struct cm_lap_msg *lap_msg;
3282	struct ib_cm_lap_event_param *param;
3283	struct ib_mad_send_buf *msg = NULL;
3284	struct rdma_ah_attr ah_attr;
3285	struct cm_av alt_av = {};
3286	int ret;
3287
3288	/* Currently Alternate path messages are not supported for
3289	 * RoCE link layer.
3290	 */
3291	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3292			       work->port->port_num))
3293		return -EINVAL;
3294
3295	/* todo: verify LAP request and send reject APR if invalid. */
3296	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3297	cm_id_priv = cm_acquire_id(
3298		cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3299		cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3300	if (!cm_id_priv)
3301		return -EINVAL;
3302
3303	param = &work->cm_event.param.lap_rcvd;
3304	memset(&work->path[0], 0, sizeof(work->path[1]));
3305	cm_path_set_rec_type(work->port->cm_dev->ib_device,
3306			     work->port->port_num, &work->path[0],
3307			     IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3308					     lap_msg));
3309	param->alternate_path = &work->path[0];
3310	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3311	work->cm_event.private_data =
3312		IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3313
3314	ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
3315				      work->port->port_num,
3316				      work->mad_recv_wc->wc,
3317				      work->mad_recv_wc->recv_buf.grh,
3318				      &ah_attr);
3319	if (ret)
3320		goto deref;
3321
3322	ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
3323	if (ret) {
3324		rdma_destroy_ah_attr(&ah_attr);
3325		return -EINVAL;
3326	}
3327
3328	spin_lock_irq(&cm_id_priv->lock);
3329	cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3330			   &ah_attr, &cm_id_priv->av);
3331	cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
3332
3333	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3334		goto unlock;
3335
3336	switch (cm_id_priv->id.lap_state) {
3337	case IB_CM_LAP_UNINIT:
3338	case IB_CM_LAP_IDLE:
3339		break;
3340	case IB_CM_MRA_LAP_SENT:
3341		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3342						     [CM_LAP_COUNTER]);
3343		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3344		if (IS_ERR(msg))
3345			goto unlock;
3346
3347		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3348			      CM_MSG_RESPONSE_OTHER,
3349			      cm_id_priv->service_timeout,
3350			      cm_id_priv->private_data,
3351			      cm_id_priv->private_data_len);
3352		spin_unlock_irq(&cm_id_priv->lock);
3353
3354		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3355		    ib_post_send_mad(msg, NULL))
3356			cm_free_response_msg(msg);
3357		goto deref;
3358	case IB_CM_LAP_RCVD:
3359		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3360						     [CM_LAP_COUNTER]);
3361		goto unlock;
3362	default:
3363		goto unlock;
3364	}
3365
3366	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3367	cm_id_priv->tid = lap_msg->hdr.tid;
3368	cm_queue_work_unlock(cm_id_priv, work);
3369	return 0;
3370
3371unlock:	spin_unlock_irq(&cm_id_priv->lock);
3372deref:	cm_deref_id(cm_id_priv);
3373	return -EINVAL;
3374}
3375
3376static int cm_apr_handler(struct cm_work *work)
3377{
3378	struct cm_id_private *cm_id_priv;
3379	struct cm_apr_msg *apr_msg;
3380
3381	/* Currently Alternate path messages are not supported for
3382	 * RoCE link layer.
3383	 */
3384	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3385			       work->port->port_num))
3386		return -EINVAL;
3387
3388	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3389	cm_id_priv = cm_acquire_id(
3390		cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3391		cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3392	if (!cm_id_priv)
3393		return -EINVAL; /* Unmatched reply. */
3394
3395	work->cm_event.param.apr_rcvd.ap_status =
3396		IBA_GET(CM_APR_AR_STATUS, apr_msg);
3397	work->cm_event.param.apr_rcvd.apr_info =
3398		IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3399	work->cm_event.param.apr_rcvd.info_len =
3400		IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3401	work->cm_event.private_data =
3402		IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3403
3404	spin_lock_irq(&cm_id_priv->lock);
3405	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3406	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3407	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3408		spin_unlock_irq(&cm_id_priv->lock);
3409		goto out;
3410	}
3411	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3412	ib_cancel_mad(cm_id_priv->msg);
3413	cm_queue_work_unlock(cm_id_priv, work);
3414	return 0;
3415out:
3416	cm_deref_id(cm_id_priv);
3417	return -EINVAL;
3418}
3419
3420static int cm_timewait_handler(struct cm_work *work)
3421{
3422	struct cm_timewait_info *timewait_info;
3423	struct cm_id_private *cm_id_priv;
3424
3425	timewait_info = container_of(work, struct cm_timewait_info, work);
3426	spin_lock_irq(&cm.lock);
3427	list_del(&timewait_info->list);
3428	spin_unlock_irq(&cm.lock);
3429
3430	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3431				   timewait_info->work.remote_id);
3432	if (!cm_id_priv)
3433		return -EINVAL;
3434
3435	spin_lock_irq(&cm_id_priv->lock);
3436	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3437	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3438		spin_unlock_irq(&cm_id_priv->lock);
3439		goto out;
3440	}
3441	cm_id_priv->id.state = IB_CM_IDLE;
3442	cm_queue_work_unlock(cm_id_priv, work);
3443	return 0;
3444out:
3445	cm_deref_id(cm_id_priv);
3446	return -EINVAL;
3447}
3448
3449static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3450			       struct cm_id_private *cm_id_priv,
3451			       struct ib_cm_sidr_req_param *param)
3452{
3453	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3454			  cm_form_tid(cm_id_priv));
3455	IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3456		be32_to_cpu(cm_id_priv->id.local_id));
3457	IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3458		be16_to_cpu(param->path->pkey));
3459	IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3460		be64_to_cpu(param->service_id));
3461
3462	if (param->private_data && param->private_data_len)
3463		IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3464			    param->private_data, param->private_data_len);
3465}
3466
3467int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3468			struct ib_cm_sidr_req_param *param)
3469{
3470	struct cm_id_private *cm_id_priv;
3471	struct ib_mad_send_buf *msg;
3472	struct cm_av av = {};
3473	unsigned long flags;
3474	int ret;
3475
3476	if (!param->path || (param->private_data &&
3477	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3478		return -EINVAL;
3479
3480	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3481	ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
3482	if (ret)
3483		return ret;
3484
3485	spin_lock_irqsave(&cm_id_priv->lock, flags);
3486	cm_move_av_from_path(&cm_id_priv->av, &av);
3487	cm_id->service_id = param->service_id;
3488	cm_id->service_mask = ~cpu_to_be64(0);
3489	cm_id_priv->timeout_ms = param->timeout_ms;
3490	cm_id_priv->max_cm_retries = param->max_cm_retries;
3491	if (cm_id->state != IB_CM_IDLE) {
3492		ret = -EINVAL;
3493		goto out_unlock;
3494	}
3495
3496	msg = cm_alloc_priv_msg(cm_id_priv);
3497	if (IS_ERR(msg)) {
3498		ret = PTR_ERR(msg);
3499		goto out_unlock;
3500	}
3501
3502	cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
3503			   param);
3504	msg->timeout_ms = cm_id_priv->timeout_ms;
3505	msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
3506
3507	trace_icm_send_sidr_req(&cm_id_priv->id);
3508	ret = ib_post_send_mad(msg, NULL);
3509	if (ret)
3510		goto out_free;
3511	cm_id->state = IB_CM_SIDR_REQ_SENT;
3512	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3513	return 0;
3514out_free:
3515	cm_free_priv_msg(msg);
3516out_unlock:
3517	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3518	return ret;
3519}
3520EXPORT_SYMBOL(ib_send_cm_sidr_req);
3521
3522static void cm_format_sidr_req_event(struct cm_work *work,
3523				     const struct cm_id_private *rx_cm_id,
3524				     struct ib_cm_id *listen_id)
3525{
3526	struct cm_sidr_req_msg *sidr_req_msg;
3527	struct ib_cm_sidr_req_event_param *param;
3528
3529	sidr_req_msg = (struct cm_sidr_req_msg *)
3530				work->mad_recv_wc->recv_buf.mad;
3531	param = &work->cm_event.param.sidr_req_rcvd;
3532	param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3533	param->listen_id = listen_id;
3534	param->service_id =
3535		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3536	param->bth_pkey = cm_get_bth_pkey(work);
3537	param->port = work->port->port_num;
3538	param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3539	work->cm_event.private_data =
3540		IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3541}
3542
3543static int cm_sidr_req_handler(struct cm_work *work)
3544{
3545	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3546	struct cm_sidr_req_msg *sidr_req_msg;
3547	struct ib_wc *wc;
3548	int ret;
3549
3550	cm_id_priv =
3551		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3552	if (IS_ERR(cm_id_priv))
3553		return PTR_ERR(cm_id_priv);
3554
3555	/* Record SGID/SLID and request ID for lookup. */
3556	sidr_req_msg = (struct cm_sidr_req_msg *)
3557				work->mad_recv_wc->recv_buf.mad;
3558
3559	cm_id_priv->id.remote_id =
3560		cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3561	cm_id_priv->id.service_id =
3562		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3563	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3564	cm_id_priv->tid = sidr_req_msg->hdr.tid;
3565
3566	wc = work->mad_recv_wc->wc;
3567	cm_id_priv->sidr_slid = wc->slid;
3568	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3569				      work->mad_recv_wc->recv_buf.grh,
3570				      &cm_id_priv->av);
3571	if (ret)
3572		goto out;
3573
3574	spin_lock_irq(&cm.lock);
3575	listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3576	if (listen_cm_id_priv) {
3577		spin_unlock_irq(&cm.lock);
3578		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3579						     [CM_SIDR_REQ_COUNTER]);
3580		goto out; /* Duplicate message. */
3581	}
3582	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3583	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3584					   cm_id_priv->id.service_id);
3585	if (!listen_cm_id_priv) {
3586		spin_unlock_irq(&cm.lock);
3587		ib_send_cm_sidr_rep(&cm_id_priv->id,
3588				    &(struct ib_cm_sidr_rep_param){
3589					    .status = IB_SIDR_UNSUPPORTED });
3590		goto out; /* No match. */
3591	}
3592	spin_unlock_irq(&cm.lock);
3593
3594	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3595	cm_id_priv->id.context = listen_cm_id_priv->id.context;
3596
3597	/*
3598	 * A SIDR ID does not need to be in the xarray since it does not receive
3599	 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3600	 * not enter timewait.
3601	 */
3602
3603	cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3604	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3605	cm_free_work(work);
3606	/*
3607	 * A pointer to the listen_cm_id is held in the event, so this deref
3608	 * must be after the event is delivered above.
3609	 */
3610	cm_deref_id(listen_cm_id_priv);
3611	if (ret)
3612		cm_destroy_id(&cm_id_priv->id, ret);
3613	return 0;
3614out:
3615	ib_destroy_cm_id(&cm_id_priv->id);
3616	return -EINVAL;
3617}
3618
3619static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3620			       struct cm_id_private *cm_id_priv,
3621			       struct ib_cm_sidr_rep_param *param)
3622{
3623	cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3624			      cm_id_priv->tid, param->ece.attr_mod);
3625	IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3626		be32_to_cpu(cm_id_priv->id.remote_id));
3627	IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3628	IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3629	IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3630		be64_to_cpu(cm_id_priv->id.service_id));
3631	IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3632	IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3633		param->ece.vendor_id & 0xFF);
3634	IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3635		(param->ece.vendor_id >> 8) & 0xFF);
3636
3637	if (param->info && param->info_length)
3638		IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3639			    param->info, param->info_length);
3640
3641	if (param->private_data && param->private_data_len)
3642		IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3643			    param->private_data, param->private_data_len);
3644}
3645
3646static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3647				   struct ib_cm_sidr_rep_param *param)
3648{
3649	struct ib_mad_send_buf *msg;
3650	unsigned long flags;
3651	int ret;
3652
3653	lockdep_assert_held(&cm_id_priv->lock);
3654
3655	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3656	    (param->private_data &&
3657	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3658		return -EINVAL;
3659
3660	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3661		return -EINVAL;
3662
3663	msg = cm_alloc_msg(cm_id_priv);
3664	if (IS_ERR(msg))
3665		return PTR_ERR(msg);
3666
3667	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3668			   param);
3669	trace_icm_send_sidr_rep(&cm_id_priv->id);
3670	ret = ib_post_send_mad(msg, NULL);
3671	if (ret) {
3672		cm_free_msg(msg);
3673		return ret;
3674	}
3675	cm_id_priv->id.state = IB_CM_IDLE;
3676	spin_lock_irqsave(&cm.lock, flags);
3677	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3678		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3679		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3680	}
3681	spin_unlock_irqrestore(&cm.lock, flags);
3682	return 0;
3683}
3684
3685int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3686			struct ib_cm_sidr_rep_param *param)
3687{
3688	struct cm_id_private *cm_id_priv =
3689		container_of(cm_id, struct cm_id_private, id);
3690	unsigned long flags;
3691	int ret;
3692
3693	spin_lock_irqsave(&cm_id_priv->lock, flags);
3694	ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3695	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3696	return ret;
3697}
3698EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3699
3700static void cm_format_sidr_rep_event(struct cm_work *work,
3701				     const struct cm_id_private *cm_id_priv)
3702{
3703	struct cm_sidr_rep_msg *sidr_rep_msg;
3704	struct ib_cm_sidr_rep_event_param *param;
3705
3706	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3707				work->mad_recv_wc->recv_buf.mad;
3708	param = &work->cm_event.param.sidr_rep_rcvd;
3709	param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3710	param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3711	param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3712	param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3713				      sidr_rep_msg);
3714	param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3715				  sidr_rep_msg);
3716	param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3717	work->cm_event.private_data =
3718		IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3719}
3720
3721static int cm_sidr_rep_handler(struct cm_work *work)
3722{
3723	struct cm_sidr_rep_msg *sidr_rep_msg;
3724	struct cm_id_private *cm_id_priv;
3725
3726	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3727				work->mad_recv_wc->recv_buf.mad;
3728	cm_id_priv = cm_acquire_id(
3729		cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3730	if (!cm_id_priv)
3731		return -EINVAL; /* Unmatched reply. */
3732
3733	spin_lock_irq(&cm_id_priv->lock);
3734	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3735		spin_unlock_irq(&cm_id_priv->lock);
3736		goto out;
3737	}
3738	cm_id_priv->id.state = IB_CM_IDLE;
3739	ib_cancel_mad(cm_id_priv->msg);
3740	spin_unlock_irq(&cm_id_priv->lock);
3741
3742	cm_format_sidr_rep_event(work, cm_id_priv);
3743	cm_process_work(cm_id_priv, work);
3744	return 0;
3745out:
3746	cm_deref_id(cm_id_priv);
3747	return -EINVAL;
3748}
3749
3750static void cm_process_send_error(struct cm_id_private *cm_id_priv,
3751				  struct ib_mad_send_buf *msg,
3752				  enum ib_cm_state state,
3753				  enum ib_wc_status wc_status)
3754{
3755	struct ib_cm_event cm_event = {};
3756	int ret;
3757
3758	/* Discard old sends or ones without a response. */
3759	spin_lock_irq(&cm_id_priv->lock);
3760	if (msg != cm_id_priv->msg) {
3761		spin_unlock_irq(&cm_id_priv->lock);
3762		cm_free_msg(msg);
3763		return;
3764	}
3765	cm_free_priv_msg(msg);
3766
3767	if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
3768	    wc_status == IB_WC_WR_FLUSH_ERR)
3769		goto out_unlock;
3770
3771	trace_icm_mad_send_err(state, wc_status);
3772	switch (state) {
3773	case IB_CM_REQ_SENT:
3774	case IB_CM_MRA_REQ_RCVD:
3775		cm_reset_to_idle(cm_id_priv);
3776		cm_event.event = IB_CM_REQ_ERROR;
3777		break;
3778	case IB_CM_REP_SENT:
3779	case IB_CM_MRA_REP_RCVD:
3780		cm_reset_to_idle(cm_id_priv);
3781		cm_event.event = IB_CM_REP_ERROR;
3782		break;
3783	case IB_CM_DREQ_SENT:
3784		cm_enter_timewait(cm_id_priv);
3785		cm_event.event = IB_CM_DREQ_ERROR;
3786		break;
3787	case IB_CM_SIDR_REQ_SENT:
3788		cm_id_priv->id.state = IB_CM_IDLE;
3789		cm_event.event = IB_CM_SIDR_REQ_ERROR;
3790		break;
3791	default:
3792		goto out_unlock;
3793	}
3794	spin_unlock_irq(&cm_id_priv->lock);
3795	cm_event.param.send_status = wc_status;
3796
3797	/* No other events can occur on the cm_id at this point. */
3798	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3799	if (ret)
3800		ib_destroy_cm_id(&cm_id_priv->id);
3801	return;
3802out_unlock:
3803	spin_unlock_irq(&cm_id_priv->lock);
3804}
3805
3806static void cm_send_handler(struct ib_mad_agent *mad_agent,
3807			    struct ib_mad_send_wc *mad_send_wc)
3808{
3809	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3810	struct cm_id_private *cm_id_priv = msg->context[0];
3811	enum ib_cm_state state =
3812		(enum ib_cm_state)(unsigned long)msg->context[1];
3813	struct cm_port *port;
3814	u16 attr_index;
3815
3816	port = mad_agent->context;
3817	attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3818				  msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3819
3820	/*
3821	 * If the send was in response to a received message (context[0] is not
3822	 * set to a cm_id), and is not a REJ, then it is a send that was
3823	 * manually retried.
3824	 */
3825	if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
3826		msg->retries = 1;
3827
3828	atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
3829	if (msg->retries)
3830		atomic_long_add(msg->retries,
3831				&port->counters[CM_XMIT_RETRIES][attr_index]);
3832
3833	if (cm_id_priv)
3834		cm_process_send_error(cm_id_priv, msg, state,
3835				      mad_send_wc->status);
3836	else
3837		cm_free_response_msg(msg);
3838}
3839
3840static void cm_work_handler(struct work_struct *_work)
3841{
3842	struct cm_work *work = container_of(_work, struct cm_work, work.work);
3843	int ret;
3844
3845	switch (work->cm_event.event) {
3846	case IB_CM_REQ_RECEIVED:
3847		ret = cm_req_handler(work);
3848		break;
3849	case IB_CM_MRA_RECEIVED:
3850		ret = cm_mra_handler(work);
3851		break;
3852	case IB_CM_REJ_RECEIVED:
3853		ret = cm_rej_handler(work);
3854		break;
3855	case IB_CM_REP_RECEIVED:
3856		ret = cm_rep_handler(work);
3857		break;
3858	case IB_CM_RTU_RECEIVED:
3859		ret = cm_rtu_handler(work);
3860		break;
3861	case IB_CM_USER_ESTABLISHED:
3862		ret = cm_establish_handler(work);
3863		break;
3864	case IB_CM_DREQ_RECEIVED:
3865		ret = cm_dreq_handler(work);
3866		break;
3867	case IB_CM_DREP_RECEIVED:
3868		ret = cm_drep_handler(work);
3869		break;
3870	case IB_CM_SIDR_REQ_RECEIVED:
3871		ret = cm_sidr_req_handler(work);
3872		break;
3873	case IB_CM_SIDR_REP_RECEIVED:
3874		ret = cm_sidr_rep_handler(work);
3875		break;
3876	case IB_CM_LAP_RECEIVED:
3877		ret = cm_lap_handler(work);
3878		break;
3879	case IB_CM_APR_RECEIVED:
3880		ret = cm_apr_handler(work);
3881		break;
3882	case IB_CM_TIMEWAIT_EXIT:
3883		ret = cm_timewait_handler(work);
3884		break;
3885	default:
3886		trace_icm_handler_err(work->cm_event.event);
3887		ret = -EINVAL;
3888		break;
3889	}
3890	if (ret)
3891		cm_free_work(work);
3892}
3893
3894static int cm_establish(struct ib_cm_id *cm_id)
3895{
3896	struct cm_id_private *cm_id_priv;
3897	struct cm_work *work;
3898	unsigned long flags;
3899	int ret = 0;
3900	struct cm_device *cm_dev;
3901
3902	cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3903	if (!cm_dev)
3904		return -ENODEV;
3905
3906	work = kmalloc(sizeof *work, GFP_ATOMIC);
3907	if (!work)
3908		return -ENOMEM;
3909
3910	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3911	spin_lock_irqsave(&cm_id_priv->lock, flags);
3912	switch (cm_id->state) {
3913	case IB_CM_REP_SENT:
3914	case IB_CM_MRA_REP_RCVD:
3915		cm_id->state = IB_CM_ESTABLISHED;
3916		break;
3917	case IB_CM_ESTABLISHED:
3918		ret = -EISCONN;
3919		break;
3920	default:
3921		trace_icm_establish_err(cm_id);
3922		ret = -EINVAL;
3923		break;
3924	}
3925	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3926
3927	if (ret) {
3928		kfree(work);
3929		goto out;
3930	}
3931
3932	/*
3933	 * The CM worker thread may try to destroy the cm_id before it
3934	 * can execute this work item.  To prevent potential deadlock,
3935	 * we need to find the cm_id once we're in the context of the
3936	 * worker thread, rather than holding a reference on it.
3937	 */
3938	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3939	work->local_id = cm_id->local_id;
3940	work->remote_id = cm_id->remote_id;
3941	work->mad_recv_wc = NULL;
3942	work->cm_event.event = IB_CM_USER_ESTABLISHED;
3943
3944	/* Check if the device started its remove_one */
3945	spin_lock_irqsave(&cm.lock, flags);
3946	if (!cm_dev->going_down) {
3947		queue_delayed_work(cm.wq, &work->work, 0);
3948	} else {
3949		kfree(work);
3950		ret = -ENODEV;
3951	}
3952	spin_unlock_irqrestore(&cm.lock, flags);
3953
3954out:
3955	return ret;
3956}
3957
3958static int cm_migrate(struct ib_cm_id *cm_id)
3959{
3960	struct cm_id_private *cm_id_priv;
3961	unsigned long flags;
3962	int ret = 0;
3963
3964	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3965	spin_lock_irqsave(&cm_id_priv->lock, flags);
3966	if (cm_id->state == IB_CM_ESTABLISHED &&
3967	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3968	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
3969		cm_id->lap_state = IB_CM_LAP_IDLE;
3970		cm_id_priv->av = cm_id_priv->alt_av;
3971	} else
3972		ret = -EINVAL;
3973	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3974
3975	return ret;
3976}
3977
3978int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3979{
3980	int ret;
3981
3982	switch (event) {
3983	case IB_EVENT_COMM_EST:
3984		ret = cm_establish(cm_id);
3985		break;
3986	case IB_EVENT_PATH_MIG:
3987		ret = cm_migrate(cm_id);
3988		break;
3989	default:
3990		ret = -EINVAL;
3991	}
3992	return ret;
3993}
3994EXPORT_SYMBOL(ib_cm_notify);
3995
3996static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3997			    struct ib_mad_send_buf *send_buf,
3998			    struct ib_mad_recv_wc *mad_recv_wc)
3999{
4000	struct cm_port *port = mad_agent->context;
4001	struct cm_work *work;
4002	enum ib_cm_event_type event;
4003	bool alt_path = false;
4004	u16 attr_id;
4005	int paths = 0;
4006	int going_down = 0;
4007
4008	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4009	case CM_REQ_ATTR_ID:
4010		alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4011						mad_recv_wc->recv_buf.mad);
4012		paths = 1 + (alt_path != 0);
4013		event = IB_CM_REQ_RECEIVED;
4014		break;
4015	case CM_MRA_ATTR_ID:
4016		event = IB_CM_MRA_RECEIVED;
4017		break;
4018	case CM_REJ_ATTR_ID:
4019		event = IB_CM_REJ_RECEIVED;
4020		break;
4021	case CM_REP_ATTR_ID:
4022		event = IB_CM_REP_RECEIVED;
4023		break;
4024	case CM_RTU_ATTR_ID:
4025		event = IB_CM_RTU_RECEIVED;
4026		break;
4027	case CM_DREQ_ATTR_ID:
4028		event = IB_CM_DREQ_RECEIVED;
4029		break;
4030	case CM_DREP_ATTR_ID:
4031		event = IB_CM_DREP_RECEIVED;
4032		break;
4033	case CM_SIDR_REQ_ATTR_ID:
4034		event = IB_CM_SIDR_REQ_RECEIVED;
4035		break;
4036	case CM_SIDR_REP_ATTR_ID:
4037		event = IB_CM_SIDR_REP_RECEIVED;
4038		break;
4039	case CM_LAP_ATTR_ID:
4040		paths = 1;
4041		event = IB_CM_LAP_RECEIVED;
4042		break;
4043	case CM_APR_ATTR_ID:
4044		event = IB_CM_APR_RECEIVED;
4045		break;
4046	default:
4047		ib_free_recv_mad(mad_recv_wc);
4048		return;
4049	}
4050
4051	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4052	atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
4053
4054	work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4055	if (!work) {
4056		ib_free_recv_mad(mad_recv_wc);
4057		return;
4058	}
4059
4060	INIT_DELAYED_WORK(&work->work, cm_work_handler);
4061	work->cm_event.event = event;
4062	work->mad_recv_wc = mad_recv_wc;
4063	work->port = port;
4064
4065	/* Check if the device started its remove_one */
4066	spin_lock_irq(&cm.lock);
4067	if (!port->cm_dev->going_down)
4068		queue_delayed_work(cm.wq, &work->work, 0);
4069	else
4070		going_down = 1;
4071	spin_unlock_irq(&cm.lock);
4072
4073	if (going_down) {
4074		kfree(work);
4075		ib_free_recv_mad(mad_recv_wc);
4076	}
4077}
4078
4079static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4080				struct ib_qp_attr *qp_attr,
4081				int *qp_attr_mask)
4082{
4083	unsigned long flags;
4084	int ret;
4085
4086	spin_lock_irqsave(&cm_id_priv->lock, flags);
4087	switch (cm_id_priv->id.state) {
4088	case IB_CM_REQ_SENT:
4089	case IB_CM_MRA_REQ_RCVD:
4090	case IB_CM_REQ_RCVD:
4091	case IB_CM_MRA_REQ_SENT:
4092	case IB_CM_REP_RCVD:
4093	case IB_CM_MRA_REP_SENT:
4094	case IB_CM_REP_SENT:
4095	case IB_CM_MRA_REP_RCVD:
4096	case IB_CM_ESTABLISHED:
4097		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4098				IB_QP_PKEY_INDEX | IB_QP_PORT;
4099		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4100		if (cm_id_priv->responder_resources)
 
 
 
 
 
 
 
4101			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4102						    IB_ACCESS_REMOTE_ATOMIC;
 
 
4103		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4104		if (cm_id_priv->av.port)
4105			qp_attr->port_num = cm_id_priv->av.port->port_num;
4106		ret = 0;
4107		break;
4108	default:
4109		trace_icm_qp_init_err(&cm_id_priv->id);
4110		ret = -EINVAL;
4111		break;
4112	}
4113	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4114	return ret;
4115}
4116
4117static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4118			       struct ib_qp_attr *qp_attr,
4119			       int *qp_attr_mask)
4120{
4121	unsigned long flags;
4122	int ret;
4123
4124	spin_lock_irqsave(&cm_id_priv->lock, flags);
4125	switch (cm_id_priv->id.state) {
4126	case IB_CM_REQ_RCVD:
4127	case IB_CM_MRA_REQ_SENT:
4128	case IB_CM_REP_RCVD:
4129	case IB_CM_MRA_REP_SENT:
4130	case IB_CM_REP_SENT:
4131	case IB_CM_MRA_REP_RCVD:
4132	case IB_CM_ESTABLISHED:
4133		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4134				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4135		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
 
 
 
 
4136		qp_attr->path_mtu = cm_id_priv->path_mtu;
4137		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4138		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4139		if (cm_id_priv->qp_type == IB_QPT_RC ||
4140		    cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4141			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4142					 IB_QP_MIN_RNR_TIMER;
4143			qp_attr->max_dest_rd_atomic =
4144					cm_id_priv->responder_resources;
4145			qp_attr->min_rnr_timer = 0;
4146		}
4147		if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
4148		    cm_id_priv->alt_av.port) {
4149			*qp_attr_mask |= IB_QP_ALT_PATH;
4150			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4151			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4152			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4153			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4154		}
4155		ret = 0;
4156		break;
4157	default:
4158		trace_icm_qp_rtr_err(&cm_id_priv->id);
4159		ret = -EINVAL;
4160		break;
4161	}
4162	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4163	return ret;
4164}
4165
4166static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4167			       struct ib_qp_attr *qp_attr,
4168			       int *qp_attr_mask)
4169{
4170	unsigned long flags;
4171	int ret;
4172
4173	spin_lock_irqsave(&cm_id_priv->lock, flags);
4174	switch (cm_id_priv->id.state) {
4175	/* Allow transition to RTS before sending REP */
4176	case IB_CM_REQ_RCVD:
4177	case IB_CM_MRA_REQ_SENT:
4178
4179	case IB_CM_REP_RCVD:
4180	case IB_CM_MRA_REP_SENT:
4181	case IB_CM_REP_SENT:
4182	case IB_CM_MRA_REP_RCVD:
4183	case IB_CM_ESTABLISHED:
4184		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4185			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4186			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4187			switch (cm_id_priv->qp_type) {
4188			case IB_QPT_RC:
4189			case IB_QPT_XRC_INI:
4190				*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4191						 IB_QP_MAX_QP_RD_ATOMIC;
4192				qp_attr->retry_cnt = cm_id_priv->retry_count;
4193				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4194				qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4195				fallthrough;
4196			case IB_QPT_XRC_TGT:
4197				*qp_attr_mask |= IB_QP_TIMEOUT;
4198				qp_attr->timeout = cm_id_priv->av.timeout;
4199				break;
4200			default:
4201				break;
4202			}
4203			if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4204				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4205				qp_attr->path_mig_state = IB_MIG_REARM;
4206			}
4207		} else {
4208			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4209			if (cm_id_priv->alt_av.port)
4210				qp_attr->alt_port_num =
4211					cm_id_priv->alt_av.port->port_num;
4212			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4213			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4214			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4215			qp_attr->path_mig_state = IB_MIG_REARM;
4216		}
4217		ret = 0;
4218		break;
4219	default:
4220		trace_icm_qp_rts_err(&cm_id_priv->id);
4221		ret = -EINVAL;
4222		break;
4223	}
4224	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4225	return ret;
4226}
4227
4228int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4229		       struct ib_qp_attr *qp_attr,
4230		       int *qp_attr_mask)
4231{
4232	struct cm_id_private *cm_id_priv;
4233	int ret;
4234
4235	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4236	switch (qp_attr->qp_state) {
4237	case IB_QPS_INIT:
4238		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4239		break;
4240	case IB_QPS_RTR:
4241		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4242		break;
4243	case IB_QPS_RTS:
4244		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4245		break;
4246	default:
4247		ret = -EINVAL;
4248		break;
4249	}
4250	return ret;
4251}
4252EXPORT_SYMBOL(ib_cm_init_qp_attr);
4253
4254static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
4255			       struct ib_port_attribute *attr, char *buf)
4256{
4257	struct cm_counter_attribute *cm_attr =
4258		container_of(attr, struct cm_counter_attribute, attr);
4259	struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
4260
4261	if (WARN_ON(!cm_dev))
4262		return -EINVAL;
4263
4264	return sysfs_emit(
4265		buf, "%ld\n",
4266		atomic_long_read(
4267			&cm_dev->port[port_num - 1]
4268				 ->counters[cm_attr->group][cm_attr->index]));
4269}
4270
4271#define CM_COUNTER_ATTR(_name, _group, _index)                                 \
4272	{                                                                      \
4273		.attr = __ATTR(_name, 0444, cm_show_counter, NULL),            \
4274		.group = _group, .index = _index                               \
4275	}
4276
4277#define CM_COUNTER_GROUP(_group, _name)                                        \
4278	static struct cm_counter_attribute cm_counter_attr_##_group[] = {      \
4279		CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER),                  \
4280		CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER),                  \
4281		CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER),                  \
4282		CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER),                  \
4283		CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER),                  \
4284		CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER),                \
4285		CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER),                \
4286		CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER),        \
4287		CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER),        \
4288		CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER),                  \
4289		CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER),                  \
4290	};                                                                     \
4291	static struct attribute *cm_counter_attrs_##_group[] = {               \
4292		&cm_counter_attr_##_group[0].attr.attr,                        \
4293		&cm_counter_attr_##_group[1].attr.attr,                        \
4294		&cm_counter_attr_##_group[2].attr.attr,                        \
4295		&cm_counter_attr_##_group[3].attr.attr,                        \
4296		&cm_counter_attr_##_group[4].attr.attr,                        \
4297		&cm_counter_attr_##_group[5].attr.attr,                        \
4298		&cm_counter_attr_##_group[6].attr.attr,                        \
4299		&cm_counter_attr_##_group[7].attr.attr,                        \
4300		&cm_counter_attr_##_group[8].attr.attr,                        \
4301		&cm_counter_attr_##_group[9].attr.attr,                        \
4302		&cm_counter_attr_##_group[10].attr.attr,                       \
4303		NULL,                                                          \
4304	};                                                                     \
4305	static const struct attribute_group cm_counter_group_##_group = {      \
4306		.name = _name,                                                 \
4307		.attrs = cm_counter_attrs_##_group,                            \
4308	};
4309
4310CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
4311CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
4312CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
4313CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
4314
4315static const struct attribute_group *cm_counter_groups[] = {
4316	&cm_counter_group_CM_XMIT,
4317	&cm_counter_group_CM_XMIT_RETRIES,
4318	&cm_counter_group_CM_RECV,
4319	&cm_counter_group_CM_RECV_DUPLICATES,
4320	NULL,
4321};
4322
4323static int cm_add_one(struct ib_device *ib_device)
4324{
4325	struct cm_device *cm_dev;
4326	struct cm_port *port;
4327	struct ib_mad_reg_req reg_req = {
4328		.mgmt_class = IB_MGMT_CLASS_CM,
4329		.mgmt_class_version = IB_CM_CLASS_VERSION,
4330	};
4331	struct ib_port_modify port_modify = {
4332		.set_port_cap_mask = IB_PORT_CM_SUP
4333	};
4334	unsigned long flags;
4335	int ret;
4336	int count = 0;
4337	u32 i;
4338
4339	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4340			 GFP_KERNEL);
4341	if (!cm_dev)
4342		return -ENOMEM;
4343
4344	kref_init(&cm_dev->kref);
4345	spin_lock_init(&cm_dev->mad_agent_lock);
4346	cm_dev->ib_device = ib_device;
4347	cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4348	cm_dev->going_down = 0;
4349
4350	ib_set_client_data(ib_device, &cm_client, cm_dev);
4351
4352	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4353	rdma_for_each_port (ib_device, i) {
4354		if (!rdma_cap_ib_cm(ib_device, i))
4355			continue;
4356
4357		port = kzalloc(sizeof *port, GFP_KERNEL);
4358		if (!port) {
4359			ret = -ENOMEM;
4360			goto error1;
4361		}
4362
4363		cm_dev->port[i-1] = port;
4364		port->cm_dev = cm_dev;
4365		port->port_num = i;
4366
4367		ret = ib_port_register_client_groups(ib_device, i,
4368						     cm_counter_groups);
4369		if (ret)
4370			goto error1;
4371
4372		port->mad_agent = ib_register_mad_agent(ib_device, i,
4373							IB_QPT_GSI,
4374							&reg_req,
4375							0,
4376							cm_send_handler,
4377							cm_recv_handler,
4378							port,
4379							0);
4380		if (IS_ERR(port->mad_agent)) {
4381			ret = PTR_ERR(port->mad_agent);
4382			goto error2;
4383		}
4384
4385		ret = ib_modify_port(ib_device, i, 0, &port_modify);
4386		if (ret)
4387			goto error3;
4388
4389		count++;
4390	}
4391
4392	if (!count) {
4393		ret = -EOPNOTSUPP;
4394		goto free;
4395	}
4396
4397	write_lock_irqsave(&cm.device_lock, flags);
4398	list_add_tail(&cm_dev->list, &cm.device_list);
4399	write_unlock_irqrestore(&cm.device_lock, flags);
4400	return 0;
4401
4402error3:
4403	ib_unregister_mad_agent(port->mad_agent);
4404error2:
4405	ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
4406error1:
4407	port_modify.set_port_cap_mask = 0;
4408	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4409	while (--i) {
4410		if (!rdma_cap_ib_cm(ib_device, i))
4411			continue;
4412
4413		port = cm_dev->port[i-1];
4414		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4415		ib_unregister_mad_agent(port->mad_agent);
4416		ib_port_unregister_client_groups(ib_device, i,
4417						 cm_counter_groups);
4418	}
4419free:
4420	cm_device_put(cm_dev);
4421	return ret;
4422}
4423
4424static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4425{
4426	struct cm_device *cm_dev = client_data;
4427	struct cm_port *port;
4428	struct ib_port_modify port_modify = {
4429		.clr_port_cap_mask = IB_PORT_CM_SUP
4430	};
4431	unsigned long flags;
4432	u32 i;
4433
4434	write_lock_irqsave(&cm.device_lock, flags);
4435	list_del(&cm_dev->list);
4436	write_unlock_irqrestore(&cm.device_lock, flags);
4437
4438	spin_lock_irq(&cm.lock);
4439	cm_dev->going_down = 1;
4440	spin_unlock_irq(&cm.lock);
4441
4442	rdma_for_each_port (ib_device, i) {
4443		struct ib_mad_agent *mad_agent;
4444
4445		if (!rdma_cap_ib_cm(ib_device, i))
4446			continue;
4447
4448		port = cm_dev->port[i-1];
4449		mad_agent = port->mad_agent;
4450		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4451		/*
4452		 * We flush the queue here after the going_down set, this
4453		 * verify that no new works will be queued in the recv handler,
4454		 * after that we can call the unregister_mad_agent
4455		 */
4456		flush_workqueue(cm.wq);
4457		/*
4458		 * The above ensures no call paths from the work are running,
4459		 * the remaining paths all take the mad_agent_lock.
4460		 */
4461		spin_lock(&cm_dev->mad_agent_lock);
4462		port->mad_agent = NULL;
4463		spin_unlock(&cm_dev->mad_agent_lock);
4464		ib_unregister_mad_agent(mad_agent);
4465		ib_port_unregister_client_groups(ib_device, i,
4466						 cm_counter_groups);
4467	}
4468
4469	cm_device_put(cm_dev);
4470}
4471
4472static int __init ib_cm_init(void)
4473{
4474	int ret;
4475
4476	INIT_LIST_HEAD(&cm.device_list);
4477	rwlock_init(&cm.device_lock);
4478	spin_lock_init(&cm.lock);
4479	cm.listen_service_table = RB_ROOT;
4480	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4481	cm.remote_id_table = RB_ROOT;
4482	cm.remote_qp_table = RB_ROOT;
4483	cm.remote_sidr_table = RB_ROOT;
4484	xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4485	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4486	INIT_LIST_HEAD(&cm.timewait_list);
4487
4488	cm.wq = alloc_workqueue("ib_cm", 0, 1);
4489	if (!cm.wq) {
4490		ret = -ENOMEM;
4491		goto error2;
4492	}
4493
4494	ret = ib_register_client(&cm_client);
4495	if (ret)
4496		goto error3;
4497
4498	return 0;
4499error3:
4500	destroy_workqueue(cm.wq);
4501error2:
4502	return ret;
4503}
4504
4505static void __exit ib_cm_cleanup(void)
4506{
4507	struct cm_timewait_info *timewait_info, *tmp;
4508
4509	spin_lock_irq(&cm.lock);
4510	list_for_each_entry(timewait_info, &cm.timewait_list, list)
4511		cancel_delayed_work(&timewait_info->work.work);
4512	spin_unlock_irq(&cm.lock);
4513
4514	ib_unregister_client(&cm_client);
4515	destroy_workqueue(cm.wq);
4516
4517	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4518		list_del(&timewait_info->list);
4519		kfree(timewait_info);
4520	}
4521
4522	WARN_ON(!xa_empty(&cm.local_id_table));
4523}
4524
4525module_init(ib_cm_init);
4526module_exit(ib_cm_cleanup);