Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
   4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
   5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
   6 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
   7 */
   8
   9#include <linux/completion.h>
  10#include <linux/in.h>
  11#include <linux/in6.h>
  12#include <linux/mutex.h>
  13#include <linux/random.h>
  14#include <linux/rbtree.h>
  15#include <linux/igmp.h>
  16#include <linux/xarray.h>
  17#include <linux/inetdevice.h>
  18#include <linux/slab.h>
  19#include <linux/module.h>
  20#include <net/route.h>
  21
  22#include <net/net_namespace.h>
  23#include <net/netns/generic.h>
  24#include <net/netevent.h>
  25#include <net/tcp.h>
  26#include <net/ipv6.h>
  27#include <net/ip_fib.h>
  28#include <net/ip6_route.h>
  29
  30#include <rdma/rdma_cm.h>
  31#include <rdma/rdma_cm_ib.h>
  32#include <rdma/rdma_netlink.h>
  33#include <rdma/ib.h>
  34#include <rdma/ib_cache.h>
  35#include <rdma/ib_cm.h>
  36#include <rdma/ib_sa.h>
  37#include <rdma/iw_cm.h>
  38
  39#include "core_priv.h"
  40#include "cma_priv.h"
  41#include "cma_trace.h"
  42
  43MODULE_AUTHOR("Sean Hefty");
  44MODULE_DESCRIPTION("Generic RDMA CM Agent");
  45MODULE_LICENSE("Dual BSD/GPL");
  46
  47#define CMA_CM_RESPONSE_TIMEOUT 20
  48#define CMA_MAX_CM_RETRIES 15
  49#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  50#define CMA_IBOE_PACKET_LIFETIME 16
  51#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
  52
  53static const char * const cma_events[] = {
  54	[RDMA_CM_EVENT_ADDR_RESOLVED]	 = "address resolved",
  55	[RDMA_CM_EVENT_ADDR_ERROR]	 = "address error",
  56	[RDMA_CM_EVENT_ROUTE_RESOLVED]	 = "route resolved ",
  57	[RDMA_CM_EVENT_ROUTE_ERROR]	 = "route error",
  58	[RDMA_CM_EVENT_CONNECT_REQUEST]	 = "connect request",
  59	[RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
  60	[RDMA_CM_EVENT_CONNECT_ERROR]	 = "connect error",
  61	[RDMA_CM_EVENT_UNREACHABLE]	 = "unreachable",
  62	[RDMA_CM_EVENT_REJECTED]	 = "rejected",
  63	[RDMA_CM_EVENT_ESTABLISHED]	 = "established",
  64	[RDMA_CM_EVENT_DISCONNECTED]	 = "disconnected",
  65	[RDMA_CM_EVENT_DEVICE_REMOVAL]	 = "device removal",
  66	[RDMA_CM_EVENT_MULTICAST_JOIN]	 = "multicast join",
  67	[RDMA_CM_EVENT_MULTICAST_ERROR]	 = "multicast error",
  68	[RDMA_CM_EVENT_ADDR_CHANGE]	 = "address change",
  69	[RDMA_CM_EVENT_TIMEWAIT_EXIT]	 = "timewait exit",
  70};
  71
  72static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
  73			      enum ib_gid_type gid_type);
  74
  75const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
  76{
  77	size_t index = event;
  78
  79	return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
  80			cma_events[index] : "unrecognized event";
  81}
  82EXPORT_SYMBOL(rdma_event_msg);
  83
  84const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
  85						int reason)
  86{
  87	if (rdma_ib_or_roce(id->device, id->port_num))
  88		return ibcm_reject_msg(reason);
  89
  90	if (rdma_protocol_iwarp(id->device, id->port_num))
  91		return iwcm_reject_msg(reason);
  92
  93	WARN_ON_ONCE(1);
  94	return "unrecognized transport";
  95}
  96EXPORT_SYMBOL(rdma_reject_msg);
  97
  98/**
  99 * rdma_is_consumer_reject - return true if the consumer rejected the connect
 100 *                           request.
 101 * @id: Communication identifier that received the REJECT event.
 102 * @reason: Value returned in the REJECT event status field.
 103 */
 104static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
 105{
 106	if (rdma_ib_or_roce(id->device, id->port_num))
 107		return reason == IB_CM_REJ_CONSUMER_DEFINED;
 108
 109	if (rdma_protocol_iwarp(id->device, id->port_num))
 110		return reason == -ECONNREFUSED;
 111
 112	WARN_ON_ONCE(1);
 113	return false;
 114}
 115
 116const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
 117				      struct rdma_cm_event *ev, u8 *data_len)
 118{
 119	const void *p;
 120
 121	if (rdma_is_consumer_reject(id, ev->status)) {
 122		*data_len = ev->param.conn.private_data_len;
 123		p = ev->param.conn.private_data;
 124	} else {
 125		*data_len = 0;
 126		p = NULL;
 127	}
 128	return p;
 129}
 130EXPORT_SYMBOL(rdma_consumer_reject_data);
 131
 132/**
 133 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
 134 * @id: Communication Identifier
 135 */
 136struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
 137{
 138	struct rdma_id_private *id_priv;
 139
 140	id_priv = container_of(id, struct rdma_id_private, id);
 141	if (id->device->node_type == RDMA_NODE_RNIC)
 142		return id_priv->cm_id.iw;
 143	return NULL;
 144}
 145EXPORT_SYMBOL(rdma_iw_cm_id);
 146
 147/**
 148 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
 149 * @res: rdma resource tracking entry pointer
 150 */
 151struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
 152{
 153	struct rdma_id_private *id_priv =
 154		container_of(res, struct rdma_id_private, res);
 155
 156	return &id_priv->id;
 157}
 158EXPORT_SYMBOL(rdma_res_to_id);
 159
 160static int cma_add_one(struct ib_device *device);
 161static void cma_remove_one(struct ib_device *device, void *client_data);
 162
 163static struct ib_client cma_client = {
 164	.name   = "cma",
 165	.add    = cma_add_one,
 166	.remove = cma_remove_one
 167};
 168
 169static struct ib_sa_client sa_client;
 170static LIST_HEAD(dev_list);
 171static LIST_HEAD(listen_any_list);
 172static DEFINE_MUTEX(lock);
 173static struct rb_root id_table = RB_ROOT;
 174/* Serialize operations of id_table tree */
 175static DEFINE_SPINLOCK(id_table_lock);
 176static struct workqueue_struct *cma_wq;
 177static unsigned int cma_pernet_id;
 178
 179struct cma_pernet {
 180	struct xarray tcp_ps;
 181	struct xarray udp_ps;
 182	struct xarray ipoib_ps;
 183	struct xarray ib_ps;
 184};
 185
 186static struct cma_pernet *cma_pernet(struct net *net)
 187{
 188	return net_generic(net, cma_pernet_id);
 189}
 190
 191static
 192struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
 193{
 194	struct cma_pernet *pernet = cma_pernet(net);
 195
 196	switch (ps) {
 197	case RDMA_PS_TCP:
 198		return &pernet->tcp_ps;
 199	case RDMA_PS_UDP:
 200		return &pernet->udp_ps;
 201	case RDMA_PS_IPOIB:
 202		return &pernet->ipoib_ps;
 203	case RDMA_PS_IB:
 204		return &pernet->ib_ps;
 205	default:
 206		return NULL;
 207	}
 208}
 209
 210struct id_table_entry {
 211	struct list_head id_list;
 212	struct rb_node rb_node;
 213};
 214
 215struct cma_device {
 216	struct list_head	list;
 217	struct ib_device	*device;
 218	struct completion	comp;
 219	refcount_t refcount;
 220	struct list_head	id_list;
 221	enum ib_gid_type	*default_gid_type;
 222	u8			*default_roce_tos;
 223};
 224
 225struct rdma_bind_list {
 226	enum rdma_ucm_port_space ps;
 227	struct hlist_head	owners;
 228	unsigned short		port;
 229};
 230
 231static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
 232			struct rdma_bind_list *bind_list, int snum)
 233{
 234	struct xarray *xa = cma_pernet_xa(net, ps);
 235
 236	return xa_insert(xa, snum, bind_list, GFP_KERNEL);
 237}
 238
 239static struct rdma_bind_list *cma_ps_find(struct net *net,
 240					  enum rdma_ucm_port_space ps, int snum)
 241{
 242	struct xarray *xa = cma_pernet_xa(net, ps);
 243
 244	return xa_load(xa, snum);
 245}
 246
 247static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
 248			  int snum)
 249{
 250	struct xarray *xa = cma_pernet_xa(net, ps);
 251
 252	xa_erase(xa, snum);
 253}
 254
 255enum {
 256	CMA_OPTION_AFONLY,
 257};
 258
 259void cma_dev_get(struct cma_device *cma_dev)
 260{
 261	refcount_inc(&cma_dev->refcount);
 262}
 263
 264void cma_dev_put(struct cma_device *cma_dev)
 265{
 266	if (refcount_dec_and_test(&cma_dev->refcount))
 267		complete(&cma_dev->comp);
 268}
 269
 270struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,
 271					     void		*cookie)
 272{
 273	struct cma_device *cma_dev;
 274	struct cma_device *found_cma_dev = NULL;
 275
 276	mutex_lock(&lock);
 277
 278	list_for_each_entry(cma_dev, &dev_list, list)
 279		if (filter(cma_dev->device, cookie)) {
 280			found_cma_dev = cma_dev;
 281			break;
 282		}
 283
 284	if (found_cma_dev)
 285		cma_dev_get(found_cma_dev);
 286	mutex_unlock(&lock);
 287	return found_cma_dev;
 288}
 289
 290int cma_get_default_gid_type(struct cma_device *cma_dev,
 291			     u32 port)
 292{
 293	if (!rdma_is_port_valid(cma_dev->device, port))
 294		return -EINVAL;
 295
 296	return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
 297}
 298
 299int cma_set_default_gid_type(struct cma_device *cma_dev,
 300			     u32 port,
 301			     enum ib_gid_type default_gid_type)
 302{
 303	unsigned long supported_gids;
 304
 305	if (!rdma_is_port_valid(cma_dev->device, port))
 306		return -EINVAL;
 307
 308	if (default_gid_type == IB_GID_TYPE_IB &&
 309	    rdma_protocol_roce_eth_encap(cma_dev->device, port))
 310		default_gid_type = IB_GID_TYPE_ROCE;
 311
 312	supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
 313
 314	if (!(supported_gids & 1 << default_gid_type))
 315		return -EINVAL;
 316
 317	cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
 318		default_gid_type;
 319
 320	return 0;
 321}
 322
 323int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
 324{
 325	if (!rdma_is_port_valid(cma_dev->device, port))
 326		return -EINVAL;
 327
 328	return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
 329}
 330
 331int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
 332			     u8 default_roce_tos)
 333{
 334	if (!rdma_is_port_valid(cma_dev->device, port))
 335		return -EINVAL;
 336
 337	cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
 338		 default_roce_tos;
 339
 340	return 0;
 341}
 342struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
 343{
 344	return cma_dev->device;
 345}
 346
 347/*
 348 * Device removal can occur at anytime, so we need extra handling to
 349 * serialize notifying the user of device removal with other callbacks.
 350 * We do this by disabling removal notification while a callback is in process,
 351 * and reporting it after the callback completes.
 352 */
 353
 354struct cma_multicast {
 355	struct rdma_id_private *id_priv;
 356	union {
 357		struct ib_sa_multicast *sa_mc;
 358		struct {
 359			struct work_struct work;
 360			struct rdma_cm_event event;
 361		} iboe_join;
 362	};
 363	struct list_head	list;
 364	void			*context;
 365	struct sockaddr_storage	addr;
 366	u8			join_state;
 367};
 368
 369struct cma_work {
 370	struct work_struct	work;
 371	struct rdma_id_private	*id;
 372	enum rdma_cm_state	old_state;
 373	enum rdma_cm_state	new_state;
 374	struct rdma_cm_event	event;
 375};
 376
 377union cma_ip_addr {
 378	struct in6_addr ip6;
 379	struct {
 380		__be32 pad[3];
 381		__be32 addr;
 382	} ip4;
 383};
 384
 385struct cma_hdr {
 386	u8 cma_version;
 387	u8 ip_version;	/* IP version: 7:4 */
 388	__be16 port;
 389	union cma_ip_addr src_addr;
 390	union cma_ip_addr dst_addr;
 391};
 392
 393#define CMA_VERSION 0x00
 394
 395struct cma_req_info {
 396	struct sockaddr_storage listen_addr_storage;
 397	struct sockaddr_storage src_addr_storage;
 398	struct ib_device *device;
 399	union ib_gid local_gid;
 400	__be64 service_id;
 401	int port;
 402	bool has_gid;
 403	u16 pkey;
 404};
 405
 406static int cma_comp_exch(struct rdma_id_private *id_priv,
 407			 enum rdma_cm_state comp, enum rdma_cm_state exch)
 408{
 409	unsigned long flags;
 410	int ret;
 411
 412	/*
 413	 * The FSM uses a funny double locking where state is protected by both
 414	 * the handler_mutex and the spinlock. State is not allowed to change
 415	 * to/from a handler_mutex protected value without also holding
 416	 * handler_mutex.
 417	 */
 418	if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
 419		lockdep_assert_held(&id_priv->handler_mutex);
 420
 421	spin_lock_irqsave(&id_priv->lock, flags);
 422	if ((ret = (id_priv->state == comp)))
 423		id_priv->state = exch;
 424	spin_unlock_irqrestore(&id_priv->lock, flags);
 425	return ret;
 426}
 427
 428static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
 429{
 430	return hdr->ip_version >> 4;
 431}
 432
 433static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
 434{
 435	hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
 436}
 437
 438static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
 439{
 440	return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
 441}
 442
 443static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
 444{
 445	return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
 446}
 447
 448static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
 449{
 450	struct in_device *in_dev = NULL;
 451
 452	if (ndev) {
 453		rtnl_lock();
 454		in_dev = __in_dev_get_rtnl(ndev);
 455		if (in_dev) {
 456			if (join)
 457				ip_mc_inc_group(in_dev,
 458						*(__be32 *)(mgid->raw + 12));
 459			else
 460				ip_mc_dec_group(in_dev,
 461						*(__be32 *)(mgid->raw + 12));
 462		}
 463		rtnl_unlock();
 464	}
 465	return (in_dev) ? 0 : -ENODEV;
 466}
 467
 468static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
 469				 struct id_table_entry *entry_b)
 470{
 471	struct rdma_id_private *id_priv = list_first_entry(
 472		&entry_b->id_list, struct rdma_id_private, id_list_entry);
 473	int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
 474	struct sockaddr *sb = cma_dst_addr(id_priv);
 475
 476	if (ifindex_a != ifindex_b)
 477		return (ifindex_a > ifindex_b) ? 1 : -1;
 478
 479	if (sa->sa_family != sb->sa_family)
 480		return sa->sa_family - sb->sa_family;
 481
 482	if (sa->sa_family == AF_INET &&
 483	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
 484		return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
 485			      &((struct sockaddr_in *)sb)->sin_addr,
 486			      sizeof(((struct sockaddr_in *)sa)->sin_addr));
 487	}
 488
 489	if (sa->sa_family == AF_INET6 &&
 490	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
 491		return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
 492				     &((struct sockaddr_in6 *)sb)->sin6_addr);
 493	}
 494
 495	return -1;
 496}
 497
 498static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
 499{
 500	struct rb_node **new, *parent = NULL;
 501	struct id_table_entry *this, *node;
 502	unsigned long flags;
 503	int result;
 504
 505	node = kzalloc(sizeof(*node), GFP_KERNEL);
 506	if (!node)
 507		return -ENOMEM;
 508
 509	spin_lock_irqsave(&id_table_lock, flags);
 510	new = &id_table.rb_node;
 511	while (*new) {
 512		this = container_of(*new, struct id_table_entry, rb_node);
 513		result = compare_netdev_and_ip(
 514			node_id_priv->id.route.addr.dev_addr.bound_dev_if,
 515			cma_dst_addr(node_id_priv), this);
 516
 517		parent = *new;
 518		if (result < 0)
 519			new = &((*new)->rb_left);
 520		else if (result > 0)
 521			new = &((*new)->rb_right);
 522		else {
 523			list_add_tail(&node_id_priv->id_list_entry,
 524				      &this->id_list);
 525			kfree(node);
 526			goto unlock;
 527		}
 528	}
 529
 530	INIT_LIST_HEAD(&node->id_list);
 531	list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
 532
 533	rb_link_node(&node->rb_node, parent, new);
 534	rb_insert_color(&node->rb_node, &id_table);
 535
 536unlock:
 537	spin_unlock_irqrestore(&id_table_lock, flags);
 538	return 0;
 539}
 540
 541static struct id_table_entry *
 542node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
 543{
 544	struct rb_node *node = root->rb_node;
 545	struct id_table_entry *data;
 546	int result;
 547
 548	while (node) {
 549		data = container_of(node, struct id_table_entry, rb_node);
 550		result = compare_netdev_and_ip(ifindex, sa, data);
 551		if (result < 0)
 552			node = node->rb_left;
 553		else if (result > 0)
 554			node = node->rb_right;
 555		else
 556			return data;
 557	}
 558
 559	return NULL;
 560}
 561
 562static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
 563{
 564	struct id_table_entry *data;
 565	unsigned long flags;
 566
 567	spin_lock_irqsave(&id_table_lock, flags);
 568	if (list_empty(&id_priv->id_list_entry))
 569		goto out;
 570
 571	data = node_from_ndev_ip(&id_table,
 572				 id_priv->id.route.addr.dev_addr.bound_dev_if,
 573				 cma_dst_addr(id_priv));
 574	if (!data)
 575		goto out;
 576
 577	list_del_init(&id_priv->id_list_entry);
 578	if (list_empty(&data->id_list)) {
 579		rb_erase(&data->rb_node, &id_table);
 580		kfree(data);
 581	}
 582out:
 583	spin_unlock_irqrestore(&id_table_lock, flags);
 584}
 585
 586static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
 587			       struct cma_device *cma_dev)
 588{
 589	cma_dev_get(cma_dev);
 590	id_priv->cma_dev = cma_dev;
 591	id_priv->id.device = cma_dev->device;
 592	id_priv->id.route.addr.dev_addr.transport =
 593		rdma_node_get_transport(cma_dev->device->node_type);
 594	list_add_tail(&id_priv->device_item, &cma_dev->id_list);
 595
 596	trace_cm_id_attach(id_priv, cma_dev->device);
 597}
 598
 599static void cma_attach_to_dev(struct rdma_id_private *id_priv,
 600			      struct cma_device *cma_dev)
 601{
 602	_cma_attach_to_dev(id_priv, cma_dev);
 603	id_priv->gid_type =
 604		cma_dev->default_gid_type[id_priv->id.port_num -
 605					  rdma_start_port(cma_dev->device)];
 606}
 607
 608static void cma_release_dev(struct rdma_id_private *id_priv)
 609{
 610	mutex_lock(&lock);
 611	list_del_init(&id_priv->device_item);
 612	cma_dev_put(id_priv->cma_dev);
 613	id_priv->cma_dev = NULL;
 614	id_priv->id.device = NULL;
 615	if (id_priv->id.route.addr.dev_addr.sgid_attr) {
 616		rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
 617		id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
 618	}
 619	mutex_unlock(&lock);
 620}
 621
 622static inline unsigned short cma_family(struct rdma_id_private *id_priv)
 623{
 624	return id_priv->id.route.addr.src_addr.ss_family;
 625}
 626
 627static int cma_set_default_qkey(struct rdma_id_private *id_priv)
 628{
 629	struct ib_sa_mcmember_rec rec;
 630	int ret = 0;
 631
 632	switch (id_priv->id.ps) {
 633	case RDMA_PS_UDP:
 634	case RDMA_PS_IB:
 635		id_priv->qkey = RDMA_UDP_QKEY;
 636		break;
 637	case RDMA_PS_IPOIB:
 638		ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
 639		ret = ib_sa_get_mcmember_rec(id_priv->id.device,
 640					     id_priv->id.port_num, &rec.mgid,
 641					     &rec);
 642		if (!ret)
 643			id_priv->qkey = be32_to_cpu(rec.qkey);
 644		break;
 645	default:
 646		break;
 647	}
 648	return ret;
 649}
 650
 651static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
 652{
 653	if (!qkey ||
 654	    (id_priv->qkey && (id_priv->qkey != qkey)))
 655		return -EINVAL;
 656
 657	id_priv->qkey = qkey;
 658	return 0;
 659}
 660
 661static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
 662{
 663	dev_addr->dev_type = ARPHRD_INFINIBAND;
 664	rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
 665	ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
 666}
 667
 668static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
 669{
 670	int ret;
 671
 672	if (addr->sa_family != AF_IB) {
 673		ret = rdma_translate_ip(addr, dev_addr);
 674	} else {
 675		cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
 676		ret = 0;
 677	}
 678
 679	return ret;
 680}
 681
 682static const struct ib_gid_attr *
 683cma_validate_port(struct ib_device *device, u32 port,
 684		  enum ib_gid_type gid_type,
 685		  union ib_gid *gid,
 686		  struct rdma_id_private *id_priv)
 687{
 688	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 689	const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV);
 690	int bound_if_index = dev_addr->bound_dev_if;
 691	int dev_type = dev_addr->dev_type;
 692	struct net_device *ndev = NULL;
 
 693
 694	if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
 695		goto out;
 696
 697	if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
 698		goto out;
 699
 700	if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
 701		goto out;
 702
 703	/*
 704	 * For drivers that do not associate more than one net device with
 705	 * their gid tables, such as iWARP drivers, it is sufficient to
 706	 * return the first table entry.
 707	 *
 708	 * Other driver classes might be included in the future.
 709	 */
 710	if (rdma_protocol_iwarp(device, port)) {
 711		sgid_attr = rdma_get_gid_attr(device, port, 0);
 712		if (IS_ERR(sgid_attr))
 713			goto out;
 714
 715		rcu_read_lock();
 716		ndev = rcu_dereference(sgid_attr->ndev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717		if (!net_eq(dev_net(ndev), dev_addr->net) ||
 718		    ndev->ifindex != bound_if_index)
 
 719			sgid_attr = ERR_PTR(-ENODEV);
 
 720		rcu_read_unlock();
 721		goto out;
 722	}
 723
 724	if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
 725		ndev = dev_get_by_index(dev_addr->net, bound_if_index);
 726		if (!ndev)
 727			goto out;
 728	} else {
 729		gid_type = IB_GID_TYPE_IB;
 730	}
 731
 732	sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
 733	dev_put(ndev);
 734out:
 735	return sgid_attr;
 736}
 737
 738static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
 739			       const struct ib_gid_attr *sgid_attr)
 740{
 741	WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
 742	id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
 743}
 744
 745/**
 746 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
 747 * based on source ip address.
 748 * @id_priv:	cm_id which should be bound to cma device
 749 *
 750 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
 751 * based on source IP address. It returns 0 on success or error code otherwise.
 752 * It is applicable to active and passive side cm_id.
 753 */
 754static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
 755{
 756	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 757	const struct ib_gid_attr *sgid_attr;
 758	union ib_gid gid, iboe_gid, *gidp;
 759	struct cma_device *cma_dev;
 760	enum ib_gid_type gid_type;
 761	int ret = -ENODEV;
 762	u32 port;
 763
 764	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
 765	    id_priv->id.ps == RDMA_PS_IPOIB)
 766		return -EINVAL;
 767
 768	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
 769		    &iboe_gid);
 770
 771	memcpy(&gid, dev_addr->src_dev_addr +
 772	       rdma_addr_gid_offset(dev_addr), sizeof(gid));
 773
 774	mutex_lock(&lock);
 775	list_for_each_entry(cma_dev, &dev_list, list) {
 776		rdma_for_each_port (cma_dev->device, port) {
 777			gidp = rdma_protocol_roce(cma_dev->device, port) ?
 778			       &iboe_gid : &gid;
 779			gid_type = cma_dev->default_gid_type[port - 1];
 780			sgid_attr = cma_validate_port(cma_dev->device, port,
 781						      gid_type, gidp, id_priv);
 782			if (!IS_ERR(sgid_attr)) {
 783				id_priv->id.port_num = port;
 784				cma_bind_sgid_attr(id_priv, sgid_attr);
 785				cma_attach_to_dev(id_priv, cma_dev);
 786				ret = 0;
 787				goto out;
 788			}
 789		}
 790	}
 791out:
 792	mutex_unlock(&lock);
 793	return ret;
 794}
 795
 796/**
 797 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
 798 * @id_priv:		cm id to bind to cma device
 799 * @listen_id_priv:	listener cm id to match against
 800 * @req:		Pointer to req structure containaining incoming
 801 *			request information
 802 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
 803 * rdma device matches for listen_id and incoming request. It also verifies
 804 * that a GID table entry is present for the source address.
 805 * Returns 0 on success, or returns error code otherwise.
 806 */
 807static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
 808			      const struct rdma_id_private *listen_id_priv,
 809			      struct cma_req_info *req)
 810{
 811	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 812	const struct ib_gid_attr *sgid_attr;
 813	enum ib_gid_type gid_type;
 814	union ib_gid gid;
 815
 816	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
 817	    id_priv->id.ps == RDMA_PS_IPOIB)
 818		return -EINVAL;
 819
 820	if (rdma_protocol_roce(req->device, req->port))
 821		rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
 822			    &gid);
 823	else
 824		memcpy(&gid, dev_addr->src_dev_addr +
 825		       rdma_addr_gid_offset(dev_addr), sizeof(gid));
 826
 827	gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
 828	sgid_attr = cma_validate_port(req->device, req->port,
 829				      gid_type, &gid, id_priv);
 830	if (IS_ERR(sgid_attr))
 831		return PTR_ERR(sgid_attr);
 832
 833	id_priv->id.port_num = req->port;
 834	cma_bind_sgid_attr(id_priv, sgid_attr);
 835	/* Need to acquire lock to protect against reader
 836	 * of cma_dev->id_list such as cma_netdev_callback() and
 837	 * cma_process_remove().
 838	 */
 839	mutex_lock(&lock);
 840	cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
 841	mutex_unlock(&lock);
 842	rdma_restrack_add(&id_priv->res);
 843	return 0;
 844}
 845
 846static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
 847			      const struct rdma_id_private *listen_id_priv)
 848{
 849	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 850	const struct ib_gid_attr *sgid_attr;
 851	struct cma_device *cma_dev;
 852	enum ib_gid_type gid_type;
 853	int ret = -ENODEV;
 854	union ib_gid gid;
 855	u32 port;
 856
 857	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
 858	    id_priv->id.ps == RDMA_PS_IPOIB)
 859		return -EINVAL;
 860
 861	memcpy(&gid, dev_addr->src_dev_addr +
 862	       rdma_addr_gid_offset(dev_addr), sizeof(gid));
 863
 864	mutex_lock(&lock);
 865
 866	cma_dev = listen_id_priv->cma_dev;
 867	port = listen_id_priv->id.port_num;
 868	gid_type = listen_id_priv->gid_type;
 869	sgid_attr = cma_validate_port(cma_dev->device, port,
 870				      gid_type, &gid, id_priv);
 871	if (!IS_ERR(sgid_attr)) {
 872		id_priv->id.port_num = port;
 873		cma_bind_sgid_attr(id_priv, sgid_attr);
 874		ret = 0;
 875		goto out;
 876	}
 877
 878	list_for_each_entry(cma_dev, &dev_list, list) {
 879		rdma_for_each_port (cma_dev->device, port) {
 880			if (listen_id_priv->cma_dev == cma_dev &&
 881			    listen_id_priv->id.port_num == port)
 882				continue;
 883
 884			gid_type = cma_dev->default_gid_type[port - 1];
 885			sgid_attr = cma_validate_port(cma_dev->device, port,
 886						      gid_type, &gid, id_priv);
 887			if (!IS_ERR(sgid_attr)) {
 888				id_priv->id.port_num = port;
 889				cma_bind_sgid_attr(id_priv, sgid_attr);
 890				ret = 0;
 891				goto out;
 892			}
 893		}
 894	}
 895
 896out:
 897	if (!ret) {
 898		cma_attach_to_dev(id_priv, cma_dev);
 899		rdma_restrack_add(&id_priv->res);
 900	}
 901
 902	mutex_unlock(&lock);
 903	return ret;
 904}
 905
 906/*
 907 * Select the source IB device and address to reach the destination IB address.
 908 */
 909static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
 910{
 911	struct cma_device *cma_dev, *cur_dev;
 912	struct sockaddr_ib *addr;
 913	union ib_gid gid, sgid, *dgid;
 914	unsigned int p;
 915	u16 pkey, index;
 916	enum ib_port_state port_state;
 917	int ret;
 918	int i;
 919
 920	cma_dev = NULL;
 921	addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
 922	dgid = (union ib_gid *) &addr->sib_addr;
 923	pkey = ntohs(addr->sib_pkey);
 924
 925	mutex_lock(&lock);
 926	list_for_each_entry(cur_dev, &dev_list, list) {
 927		rdma_for_each_port (cur_dev->device, p) {
 928			if (!rdma_cap_af_ib(cur_dev->device, p))
 929				continue;
 930
 931			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
 932				continue;
 933
 934			if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
 935				continue;
 936
 937			for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
 938			     ++i) {
 939				ret = rdma_query_gid(cur_dev->device, p, i,
 940						     &gid);
 941				if (ret)
 942					continue;
 943
 944				if (!memcmp(&gid, dgid, sizeof(gid))) {
 945					cma_dev = cur_dev;
 946					sgid = gid;
 947					id_priv->id.port_num = p;
 948					goto found;
 949				}
 950
 951				if (!cma_dev && (gid.global.subnet_prefix ==
 952				    dgid->global.subnet_prefix) &&
 953				    port_state == IB_PORT_ACTIVE) {
 954					cma_dev = cur_dev;
 955					sgid = gid;
 956					id_priv->id.port_num = p;
 957					goto found;
 958				}
 959			}
 960		}
 961	}
 962	mutex_unlock(&lock);
 963	return -ENODEV;
 964
 965found:
 966	cma_attach_to_dev(id_priv, cma_dev);
 967	rdma_restrack_add(&id_priv->res);
 968	mutex_unlock(&lock);
 969	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
 970	memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
 971	cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
 972	return 0;
 973}
 974
 975static void cma_id_get(struct rdma_id_private *id_priv)
 976{
 977	refcount_inc(&id_priv->refcount);
 978}
 979
 980static void cma_id_put(struct rdma_id_private *id_priv)
 981{
 982	if (refcount_dec_and_test(&id_priv->refcount))
 983		complete(&id_priv->comp);
 984}
 985
 986static struct rdma_id_private *
 987__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
 988		 void *context, enum rdma_ucm_port_space ps,
 989		 enum ib_qp_type qp_type, const struct rdma_id_private *parent)
 990{
 991	struct rdma_id_private *id_priv;
 992
 993	id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
 994	if (!id_priv)
 995		return ERR_PTR(-ENOMEM);
 996
 997	id_priv->state = RDMA_CM_IDLE;
 998	id_priv->id.context = context;
 999	id_priv->id.event_handler = event_handler;
1000	id_priv->id.ps = ps;
1001	id_priv->id.qp_type = qp_type;
1002	id_priv->tos_set = false;
1003	id_priv->timeout_set = false;
1004	id_priv->min_rnr_timer_set = false;
1005	id_priv->gid_type = IB_GID_TYPE_IB;
1006	spin_lock_init(&id_priv->lock);
1007	mutex_init(&id_priv->qp_mutex);
1008	init_completion(&id_priv->comp);
1009	refcount_set(&id_priv->refcount, 1);
1010	mutex_init(&id_priv->handler_mutex);
1011	INIT_LIST_HEAD(&id_priv->device_item);
1012	INIT_LIST_HEAD(&id_priv->id_list_entry);
1013	INIT_LIST_HEAD(&id_priv->listen_list);
1014	INIT_LIST_HEAD(&id_priv->mc_list);
1015	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
1016	id_priv->id.route.addr.dev_addr.net = get_net(net);
1017	id_priv->seq_num &= 0x00ffffff;
1018
1019	rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
1020	if (parent)
1021		rdma_restrack_parent_name(&id_priv->res, &parent->res);
1022
1023	return id_priv;
1024}
1025
1026struct rdma_cm_id *
1027__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
1028			void *context, enum rdma_ucm_port_space ps,
1029			enum ib_qp_type qp_type, const char *caller)
1030{
1031	struct rdma_id_private *ret;
1032
1033	ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
1034	if (IS_ERR(ret))
1035		return ERR_CAST(ret);
1036
1037	rdma_restrack_set_name(&ret->res, caller);
1038	return &ret->id;
1039}
1040EXPORT_SYMBOL(__rdma_create_kernel_id);
1041
1042struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
1043				       void *context,
1044				       enum rdma_ucm_port_space ps,
1045				       enum ib_qp_type qp_type)
1046{
1047	struct rdma_id_private *ret;
1048
1049	ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
1050			       ps, qp_type, NULL);
1051	if (IS_ERR(ret))
1052		return ERR_CAST(ret);
1053
1054	rdma_restrack_set_name(&ret->res, NULL);
1055	return &ret->id;
1056}
1057EXPORT_SYMBOL(rdma_create_user_id);
1058
1059static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1060{
1061	struct ib_qp_attr qp_attr;
1062	int qp_attr_mask, ret;
1063
1064	qp_attr.qp_state = IB_QPS_INIT;
1065	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1066	if (ret)
1067		return ret;
1068
1069	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1070	if (ret)
1071		return ret;
1072
1073	qp_attr.qp_state = IB_QPS_RTR;
1074	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
1075	if (ret)
1076		return ret;
1077
1078	qp_attr.qp_state = IB_QPS_RTS;
1079	qp_attr.sq_psn = 0;
1080	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
1081
1082	return ret;
1083}
1084
1085static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1086{
1087	struct ib_qp_attr qp_attr;
1088	int qp_attr_mask, ret;
1089
1090	qp_attr.qp_state = IB_QPS_INIT;
1091	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1092	if (ret)
1093		return ret;
1094
1095	return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1096}
1097
1098int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
1099		   struct ib_qp_init_attr *qp_init_attr)
1100{
1101	struct rdma_id_private *id_priv;
1102	struct ib_qp *qp;
1103	int ret;
1104
1105	id_priv = container_of(id, struct rdma_id_private, id);
1106	if (id->device != pd->device) {
1107		ret = -EINVAL;
1108		goto out_err;
1109	}
1110
1111	qp_init_attr->port_num = id->port_num;
1112	qp = ib_create_qp(pd, qp_init_attr);
1113	if (IS_ERR(qp)) {
1114		ret = PTR_ERR(qp);
1115		goto out_err;
1116	}
1117
1118	if (id->qp_type == IB_QPT_UD)
1119		ret = cma_init_ud_qp(id_priv, qp);
1120	else
1121		ret = cma_init_conn_qp(id_priv, qp);
1122	if (ret)
1123		goto out_destroy;
1124
1125	id->qp = qp;
1126	id_priv->qp_num = qp->qp_num;
1127	id_priv->srq = (qp->srq != NULL);
1128	trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
1129	return 0;
1130out_destroy:
1131	ib_destroy_qp(qp);
1132out_err:
1133	trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
1134	return ret;
1135}
1136EXPORT_SYMBOL(rdma_create_qp);
1137
1138void rdma_destroy_qp(struct rdma_cm_id *id)
1139{
1140	struct rdma_id_private *id_priv;
1141
1142	id_priv = container_of(id, struct rdma_id_private, id);
1143	trace_cm_qp_destroy(id_priv);
1144	mutex_lock(&id_priv->qp_mutex);
1145	ib_destroy_qp(id_priv->id.qp);
1146	id_priv->id.qp = NULL;
1147	mutex_unlock(&id_priv->qp_mutex);
1148}
1149EXPORT_SYMBOL(rdma_destroy_qp);
1150
1151static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
1152			     struct rdma_conn_param *conn_param)
1153{
1154	struct ib_qp_attr qp_attr;
1155	int qp_attr_mask, ret;
1156
1157	mutex_lock(&id_priv->qp_mutex);
1158	if (!id_priv->id.qp) {
1159		ret = 0;
1160		goto out;
1161	}
1162
1163	/* Need to update QP attributes from default values. */
1164	qp_attr.qp_state = IB_QPS_INIT;
1165	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1166	if (ret)
1167		goto out;
1168
1169	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1170	if (ret)
1171		goto out;
1172
1173	qp_attr.qp_state = IB_QPS_RTR;
1174	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1175	if (ret)
1176		goto out;
1177
1178	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1179
1180	if (conn_param)
1181		qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
1182	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1183out:
1184	mutex_unlock(&id_priv->qp_mutex);
1185	return ret;
1186}
1187
1188static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
1189			     struct rdma_conn_param *conn_param)
1190{
1191	struct ib_qp_attr qp_attr;
1192	int qp_attr_mask, ret;
1193
1194	mutex_lock(&id_priv->qp_mutex);
1195	if (!id_priv->id.qp) {
1196		ret = 0;
1197		goto out;
1198	}
1199
1200	qp_attr.qp_state = IB_QPS_RTS;
1201	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1202	if (ret)
1203		goto out;
1204
1205	if (conn_param)
1206		qp_attr.max_rd_atomic = conn_param->initiator_depth;
1207	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1208out:
1209	mutex_unlock(&id_priv->qp_mutex);
1210	return ret;
1211}
1212
1213static int cma_modify_qp_err(struct rdma_id_private *id_priv)
1214{
1215	struct ib_qp_attr qp_attr;
1216	int ret;
1217
1218	mutex_lock(&id_priv->qp_mutex);
1219	if (!id_priv->id.qp) {
1220		ret = 0;
1221		goto out;
1222	}
1223
1224	qp_attr.qp_state = IB_QPS_ERR;
1225	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
1226out:
1227	mutex_unlock(&id_priv->qp_mutex);
1228	return ret;
1229}
1230
1231static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
1232			       struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1233{
1234	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
1235	int ret;
1236	u16 pkey;
1237
1238	if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
1239		pkey = 0xffff;
1240	else
1241		pkey = ib_addr_get_pkey(dev_addr);
1242
1243	ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
1244				  pkey, &qp_attr->pkey_index);
1245	if (ret)
1246		return ret;
1247
1248	qp_attr->port_num = id_priv->id.port_num;
1249	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
1250
1251	if (id_priv->id.qp_type == IB_QPT_UD) {
1252		ret = cma_set_default_qkey(id_priv);
1253		if (ret)
1254			return ret;
1255
1256		qp_attr->qkey = id_priv->qkey;
1257		*qp_attr_mask |= IB_QP_QKEY;
1258	} else {
1259		qp_attr->qp_access_flags = 0;
1260		*qp_attr_mask |= IB_QP_ACCESS_FLAGS;
1261	}
1262	return 0;
1263}
1264
1265int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1266		       int *qp_attr_mask)
1267{
1268	struct rdma_id_private *id_priv;
1269	int ret = 0;
1270
1271	id_priv = container_of(id, struct rdma_id_private, id);
1272	if (rdma_cap_ib_cm(id->device, id->port_num)) {
1273		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
1274			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
1275		else
1276			ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
1277						 qp_attr_mask);
1278
1279		if (qp_attr->qp_state == IB_QPS_RTR)
1280			qp_attr->rq_psn = id_priv->seq_num;
1281	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1282		if (!id_priv->cm_id.iw) {
1283			qp_attr->qp_access_flags = 0;
1284			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1285		} else
1286			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1287						 qp_attr_mask);
1288		qp_attr->port_num = id_priv->id.port_num;
1289		*qp_attr_mask |= IB_QP_PORT;
1290	} else {
1291		ret = -ENOSYS;
1292	}
1293
1294	if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
1295		qp_attr->timeout = id_priv->timeout;
1296
1297	if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
1298		qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
1299
1300	return ret;
1301}
1302EXPORT_SYMBOL(rdma_init_qp_attr);
1303
1304static inline bool cma_zero_addr(const struct sockaddr *addr)
1305{
1306	switch (addr->sa_family) {
1307	case AF_INET:
1308		return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
1309	case AF_INET6:
1310		return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
1311	case AF_IB:
1312		return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
1313	default:
1314		return false;
1315	}
1316}
1317
1318static inline bool cma_loopback_addr(const struct sockaddr *addr)
1319{
1320	switch (addr->sa_family) {
1321	case AF_INET:
1322		return ipv4_is_loopback(
1323			((struct sockaddr_in *)addr)->sin_addr.s_addr);
1324	case AF_INET6:
1325		return ipv6_addr_loopback(
1326			&((struct sockaddr_in6 *)addr)->sin6_addr);
1327	case AF_IB:
1328		return ib_addr_loopback(
1329			&((struct sockaddr_ib *)addr)->sib_addr);
1330	default:
1331		return false;
1332	}
1333}
1334
1335static inline bool cma_any_addr(const struct sockaddr *addr)
1336{
1337	return cma_zero_addr(addr) || cma_loopback_addr(addr);
1338}
1339
1340static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
1341{
1342	if (src->sa_family != dst->sa_family)
1343		return -1;
1344
1345	switch (src->sa_family) {
1346	case AF_INET:
1347		return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
1348		       ((struct sockaddr_in *)dst)->sin_addr.s_addr;
1349	case AF_INET6: {
1350		struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
1351		struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
1352		bool link_local;
1353
1354		if (ipv6_addr_cmp(&src_addr6->sin6_addr,
1355					  &dst_addr6->sin6_addr))
1356			return 1;
1357		link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
1358			     IPV6_ADDR_LINKLOCAL;
1359		/* Link local must match their scope_ids */
1360		return link_local ? (src_addr6->sin6_scope_id !=
1361				     dst_addr6->sin6_scope_id) :
1362				    0;
1363	}
1364
1365	default:
1366		return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
1367				   &((struct sockaddr_ib *) dst)->sib_addr);
1368	}
1369}
1370
1371static __be16 cma_port(const struct sockaddr *addr)
1372{
1373	struct sockaddr_ib *sib;
1374
1375	switch (addr->sa_family) {
1376	case AF_INET:
1377		return ((struct sockaddr_in *) addr)->sin_port;
1378	case AF_INET6:
1379		return ((struct sockaddr_in6 *) addr)->sin6_port;
1380	case AF_IB:
1381		sib = (struct sockaddr_ib *) addr;
1382		return htons((u16) (be64_to_cpu(sib->sib_sid) &
1383				    be64_to_cpu(sib->sib_sid_mask)));
1384	default:
1385		return 0;
1386	}
1387}
1388
1389static inline int cma_any_port(const struct sockaddr *addr)
1390{
1391	return !cma_port(addr);
1392}
1393
1394static void cma_save_ib_info(struct sockaddr *src_addr,
1395			     struct sockaddr *dst_addr,
1396			     const struct rdma_cm_id *listen_id,
1397			     const struct sa_path_rec *path)
1398{
1399	struct sockaddr_ib *listen_ib, *ib;
1400
1401	listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
1402	if (src_addr) {
1403		ib = (struct sockaddr_ib *)src_addr;
1404		ib->sib_family = AF_IB;
1405		if (path) {
1406			ib->sib_pkey = path->pkey;
1407			ib->sib_flowinfo = path->flow_label;
1408			memcpy(&ib->sib_addr, &path->sgid, 16);
1409			ib->sib_sid = path->service_id;
1410			ib->sib_scope_id = 0;
1411		} else {
1412			ib->sib_pkey = listen_ib->sib_pkey;
1413			ib->sib_flowinfo = listen_ib->sib_flowinfo;
1414			ib->sib_addr = listen_ib->sib_addr;
1415			ib->sib_sid = listen_ib->sib_sid;
1416			ib->sib_scope_id = listen_ib->sib_scope_id;
1417		}
1418		ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
1419	}
1420	if (dst_addr) {
1421		ib = (struct sockaddr_ib *)dst_addr;
1422		ib->sib_family = AF_IB;
1423		if (path) {
1424			ib->sib_pkey = path->pkey;
1425			ib->sib_flowinfo = path->flow_label;
1426			memcpy(&ib->sib_addr, &path->dgid, 16);
1427		}
1428	}
1429}
1430
1431static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1432			      struct sockaddr_in *dst_addr,
1433			      struct cma_hdr *hdr,
1434			      __be16 local_port)
1435{
1436	if (src_addr) {
1437		*src_addr = (struct sockaddr_in) {
1438			.sin_family = AF_INET,
1439			.sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1440			.sin_port = local_port,
1441		};
1442	}
1443
1444	if (dst_addr) {
1445		*dst_addr = (struct sockaddr_in) {
1446			.sin_family = AF_INET,
1447			.sin_addr.s_addr = hdr->src_addr.ip4.addr,
1448			.sin_port = hdr->port,
1449		};
1450	}
1451}
1452
1453static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1454			      struct sockaddr_in6 *dst_addr,
1455			      struct cma_hdr *hdr,
1456			      __be16 local_port)
1457{
1458	if (src_addr) {
1459		*src_addr = (struct sockaddr_in6) {
1460			.sin6_family = AF_INET6,
1461			.sin6_addr = hdr->dst_addr.ip6,
1462			.sin6_port = local_port,
1463		};
1464	}
1465
1466	if (dst_addr) {
1467		*dst_addr = (struct sockaddr_in6) {
1468			.sin6_family = AF_INET6,
1469			.sin6_addr = hdr->src_addr.ip6,
1470			.sin6_port = hdr->port,
1471		};
1472	}
1473}
1474
1475static u16 cma_port_from_service_id(__be64 service_id)
1476{
1477	return (u16)be64_to_cpu(service_id);
1478}
1479
1480static int cma_save_ip_info(struct sockaddr *src_addr,
1481			    struct sockaddr *dst_addr,
1482			    const struct ib_cm_event *ib_event,
1483			    __be64 service_id)
1484{
1485	struct cma_hdr *hdr;
1486	__be16 port;
1487
1488	hdr = ib_event->private_data;
1489	if (hdr->cma_version != CMA_VERSION)
1490		return -EINVAL;
1491
1492	port = htons(cma_port_from_service_id(service_id));
1493
1494	switch (cma_get_ip_ver(hdr)) {
1495	case 4:
1496		cma_save_ip4_info((struct sockaddr_in *)src_addr,
1497				  (struct sockaddr_in *)dst_addr, hdr, port);
1498		break;
1499	case 6:
1500		cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1501				  (struct sockaddr_in6 *)dst_addr, hdr, port);
1502		break;
1503	default:
1504		return -EAFNOSUPPORT;
1505	}
1506
1507	return 0;
1508}
1509
1510static int cma_save_net_info(struct sockaddr *src_addr,
1511			     struct sockaddr *dst_addr,
1512			     const struct rdma_cm_id *listen_id,
1513			     const struct ib_cm_event *ib_event,
1514			     sa_family_t sa_family, __be64 service_id)
1515{
1516	if (sa_family == AF_IB) {
1517		if (ib_event->event == IB_CM_REQ_RECEIVED)
1518			cma_save_ib_info(src_addr, dst_addr, listen_id,
1519					 ib_event->param.req_rcvd.primary_path);
1520		else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1521			cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1522		return 0;
1523	}
1524
1525	return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1526}
1527
1528static int cma_save_req_info(const struct ib_cm_event *ib_event,
1529			     struct cma_req_info *req)
1530{
1531	const struct ib_cm_req_event_param *req_param =
1532		&ib_event->param.req_rcvd;
1533	const struct ib_cm_sidr_req_event_param *sidr_param =
1534		&ib_event->param.sidr_req_rcvd;
1535
1536	switch (ib_event->event) {
1537	case IB_CM_REQ_RECEIVED:
1538		req->device	= req_param->listen_id->device;
1539		req->port	= req_param->port;
1540		memcpy(&req->local_gid, &req_param->primary_path->sgid,
1541		       sizeof(req->local_gid));
1542		req->has_gid	= true;
1543		req->service_id = req_param->primary_path->service_id;
1544		req->pkey	= be16_to_cpu(req_param->primary_path->pkey);
1545		if (req->pkey != req_param->bth_pkey)
1546			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1547					    "RDMA CMA: in the future this may cause the request to be dropped\n",
1548					    req_param->bth_pkey, req->pkey);
1549		break;
1550	case IB_CM_SIDR_REQ_RECEIVED:
1551		req->device	= sidr_param->listen_id->device;
1552		req->port	= sidr_param->port;
1553		req->has_gid	= false;
1554		req->service_id	= sidr_param->service_id;
1555		req->pkey	= sidr_param->pkey;
1556		if (req->pkey != sidr_param->bth_pkey)
1557			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1558					    "RDMA CMA: in the future this may cause the request to be dropped\n",
1559					    sidr_param->bth_pkey, req->pkey);
1560		break;
1561	default:
1562		return -EINVAL;
1563	}
1564
1565	return 0;
1566}
1567
1568static bool validate_ipv4_net_dev(struct net_device *net_dev,
1569				  const struct sockaddr_in *dst_addr,
1570				  const struct sockaddr_in *src_addr)
1571{
1572	__be32 daddr = dst_addr->sin_addr.s_addr,
1573	       saddr = src_addr->sin_addr.s_addr;
1574	struct fib_result res;
1575	struct flowi4 fl4;
1576	int err;
1577	bool ret;
1578
1579	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1580	    ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1581	    ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1582	    ipv4_is_loopback(saddr))
1583		return false;
1584
1585	memset(&fl4, 0, sizeof(fl4));
1586	fl4.flowi4_oif = net_dev->ifindex;
1587	fl4.daddr = daddr;
1588	fl4.saddr = saddr;
1589
1590	rcu_read_lock();
1591	err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1592	ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1593	rcu_read_unlock();
1594
1595	return ret;
1596}
1597
1598static bool validate_ipv6_net_dev(struct net_device *net_dev,
1599				  const struct sockaddr_in6 *dst_addr,
1600				  const struct sockaddr_in6 *src_addr)
1601{
1602#if IS_ENABLED(CONFIG_IPV6)
1603	const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
1604			   IPV6_ADDR_LINKLOCAL;
1605	struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
1606					 &src_addr->sin6_addr, net_dev->ifindex,
1607					 NULL, strict);
1608	bool ret;
1609
1610	if (!rt)
1611		return false;
1612
1613	ret = rt->rt6i_idev->dev == net_dev;
1614	ip6_rt_put(rt);
1615
1616	return ret;
1617#else
1618	return false;
1619#endif
1620}
1621
1622static bool validate_net_dev(struct net_device *net_dev,
1623			     const struct sockaddr *daddr,
1624			     const struct sockaddr *saddr)
1625{
1626	const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1627	const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1628	const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1629	const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1630
1631	switch (daddr->sa_family) {
1632	case AF_INET:
1633		return saddr->sa_family == AF_INET &&
1634		       validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1635
1636	case AF_INET6:
1637		return saddr->sa_family == AF_INET6 &&
1638		       validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1639
1640	default:
1641		return false;
1642	}
1643}
1644
1645static struct net_device *
1646roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
1647{
1648	const struct ib_gid_attr *sgid_attr = NULL;
1649	struct net_device *ndev;
1650
1651	if (ib_event->event == IB_CM_REQ_RECEIVED)
1652		sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
1653	else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1654		sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
1655
1656	if (!sgid_attr)
1657		return NULL;
1658
1659	rcu_read_lock();
1660	ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
1661	if (IS_ERR(ndev))
1662		ndev = NULL;
1663	else
1664		dev_hold(ndev);
1665	rcu_read_unlock();
1666	return ndev;
1667}
1668
1669static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
1670					  struct cma_req_info *req)
1671{
1672	struct sockaddr *listen_addr =
1673			(struct sockaddr *)&req->listen_addr_storage;
1674	struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
1675	struct net_device *net_dev;
1676	const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1677	int err;
1678
1679	err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1680			       req->service_id);
1681	if (err)
1682		return ERR_PTR(err);
1683
1684	if (rdma_protocol_roce(req->device, req->port))
1685		net_dev = roce_get_net_dev_by_cm_event(ib_event);
1686	else
1687		net_dev = ib_get_net_dev_by_params(req->device, req->port,
1688						   req->pkey,
1689						   gid, listen_addr);
1690	if (!net_dev)
1691		return ERR_PTR(-ENODEV);
1692
1693	return net_dev;
1694}
1695
1696static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
1697{
1698	return (be64_to_cpu(service_id) >> 16) & 0xffff;
1699}
1700
1701static bool cma_match_private_data(struct rdma_id_private *id_priv,
1702				   const struct cma_hdr *hdr)
1703{
1704	struct sockaddr *addr = cma_src_addr(id_priv);
1705	__be32 ip4_addr;
1706	struct in6_addr ip6_addr;
1707
1708	if (cma_any_addr(addr) && !id_priv->afonly)
1709		return true;
1710
1711	switch (addr->sa_family) {
1712	case AF_INET:
1713		ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1714		if (cma_get_ip_ver(hdr) != 4)
1715			return false;
1716		if (!cma_any_addr(addr) &&
1717		    hdr->dst_addr.ip4.addr != ip4_addr)
1718			return false;
1719		break;
1720	case AF_INET6:
1721		ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1722		if (cma_get_ip_ver(hdr) != 6)
1723			return false;
1724		if (!cma_any_addr(addr) &&
1725		    memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1726			return false;
1727		break;
1728	case AF_IB:
1729		return true;
1730	default:
1731		return false;
1732	}
1733
1734	return true;
1735}
1736
1737static bool cma_protocol_roce(const struct rdma_cm_id *id)
1738{
1739	struct ib_device *device = id->device;
1740	const u32 port_num = id->port_num ?: rdma_start_port(device);
1741
1742	return rdma_protocol_roce(device, port_num);
1743}
1744
1745static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
1746{
1747	const struct sockaddr *daddr =
1748			(const struct sockaddr *)&req->listen_addr_storage;
1749	const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1750
1751	/* Returns true if the req is for IPv6 link local */
1752	return (daddr->sa_family == AF_INET6 &&
1753		(ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
1754}
1755
1756static bool cma_match_net_dev(const struct rdma_cm_id *id,
1757			      const struct net_device *net_dev,
1758			      const struct cma_req_info *req)
1759{
1760	const struct rdma_addr *addr = &id->route.addr;
1761
1762	if (!net_dev)
1763		/* This request is an AF_IB request */
1764		return (!id->port_num || id->port_num == req->port) &&
1765		       (addr->src_addr.ss_family == AF_IB);
1766
1767	/*
1768	 * If the request is not for IPv6 link local, allow matching
1769	 * request to any netdevice of the one or multiport rdma device.
1770	 */
1771	if (!cma_is_req_ipv6_ll(req))
1772		return true;
1773	/*
1774	 * Net namespaces must match, and if the listner is listening
1775	 * on a specific netdevice than netdevice must match as well.
1776	 */
1777	if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1778	    (!!addr->dev_addr.bound_dev_if ==
1779	     (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1780		return true;
1781	else
1782		return false;
1783}
1784
1785static struct rdma_id_private *cma_find_listener(
1786		const struct rdma_bind_list *bind_list,
1787		const struct ib_cm_id *cm_id,
1788		const struct ib_cm_event *ib_event,
1789		const struct cma_req_info *req,
1790		const struct net_device *net_dev)
1791{
1792	struct rdma_id_private *id_priv, *id_priv_dev;
1793
1794	lockdep_assert_held(&lock);
1795
1796	if (!bind_list)
1797		return ERR_PTR(-EINVAL);
1798
1799	hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1800		if (cma_match_private_data(id_priv, ib_event->private_data)) {
1801			if (id_priv->id.device == cm_id->device &&
1802			    cma_match_net_dev(&id_priv->id, net_dev, req))
1803				return id_priv;
1804			list_for_each_entry(id_priv_dev,
1805					    &id_priv->listen_list,
1806					    listen_item) {
1807				if (id_priv_dev->id.device == cm_id->device &&
1808				    cma_match_net_dev(&id_priv_dev->id,
1809						      net_dev, req))
1810					return id_priv_dev;
1811			}
1812		}
1813	}
1814
1815	return ERR_PTR(-EINVAL);
1816}
1817
1818static struct rdma_id_private *
1819cma_ib_id_from_event(struct ib_cm_id *cm_id,
1820		     const struct ib_cm_event *ib_event,
1821		     struct cma_req_info *req,
1822		     struct net_device **net_dev)
1823{
1824	struct rdma_bind_list *bind_list;
1825	struct rdma_id_private *id_priv;
1826	int err;
1827
1828	err = cma_save_req_info(ib_event, req);
1829	if (err)
1830		return ERR_PTR(err);
1831
1832	*net_dev = cma_get_net_dev(ib_event, req);
1833	if (IS_ERR(*net_dev)) {
1834		if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
1835			/* Assuming the protocol is AF_IB */
1836			*net_dev = NULL;
1837		} else {
1838			return ERR_CAST(*net_dev);
1839		}
1840	}
1841
1842	mutex_lock(&lock);
1843	/*
1844	 * Net namespace might be getting deleted while route lookup,
1845	 * cm_id lookup is in progress. Therefore, perform netdevice
1846	 * validation, cm_id lookup under rcu lock.
1847	 * RCU lock along with netdevice state check, synchronizes with
1848	 * netdevice migrating to different net namespace and also avoids
1849	 * case where net namespace doesn't get deleted while lookup is in
1850	 * progress.
1851	 * If the device state is not IFF_UP, its properties such as ifindex
1852	 * and nd_net cannot be trusted to remain valid without rcu lock.
1853	 * net/core/dev.c change_net_namespace() ensures to synchronize with
1854	 * ongoing operations on net device after device is closed using
1855	 * synchronize_net().
1856	 */
1857	rcu_read_lock();
1858	if (*net_dev) {
1859		/*
1860		 * If netdevice is down, it is likely that it is administratively
1861		 * down or it might be migrating to different namespace.
1862		 * In that case avoid further processing, as the net namespace
1863		 * or ifindex may change.
1864		 */
1865		if (((*net_dev)->flags & IFF_UP) == 0) {
1866			id_priv = ERR_PTR(-EHOSTUNREACH);
1867			goto err;
1868		}
1869
1870		if (!validate_net_dev(*net_dev,
1871				 (struct sockaddr *)&req->src_addr_storage,
1872				 (struct sockaddr *)&req->listen_addr_storage)) {
1873			id_priv = ERR_PTR(-EHOSTUNREACH);
1874			goto err;
1875		}
1876	}
1877
1878	bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
1879				rdma_ps_from_service_id(req->service_id),
1880				cma_port_from_service_id(req->service_id));
1881	id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
1882err:
1883	rcu_read_unlock();
1884	mutex_unlock(&lock);
1885	if (IS_ERR(id_priv) && *net_dev) {
1886		dev_put(*net_dev);
1887		*net_dev = NULL;
1888	}
1889	return id_priv;
1890}
1891
1892static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
1893{
1894	return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1895}
1896
1897static void cma_cancel_route(struct rdma_id_private *id_priv)
1898{
1899	if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1900		if (id_priv->query)
1901			ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1902	}
1903}
1904
1905static void _cma_cancel_listens(struct rdma_id_private *id_priv)
1906{
1907	struct rdma_id_private *dev_id_priv;
1908
1909	lockdep_assert_held(&lock);
1910
1911	/*
1912	 * Remove from listen_any_list to prevent added devices from spawning
1913	 * additional listen requests.
1914	 */
1915	list_del_init(&id_priv->listen_any_item);
1916
1917	while (!list_empty(&id_priv->listen_list)) {
1918		dev_id_priv =
1919			list_first_entry(&id_priv->listen_list,
1920					 struct rdma_id_private, listen_item);
1921		/* sync with device removal to avoid duplicate destruction */
1922		list_del_init(&dev_id_priv->device_item);
1923		list_del_init(&dev_id_priv->listen_item);
1924		mutex_unlock(&lock);
1925
1926		rdma_destroy_id(&dev_id_priv->id);
1927		mutex_lock(&lock);
1928	}
1929}
1930
1931static void cma_cancel_listens(struct rdma_id_private *id_priv)
1932{
1933	mutex_lock(&lock);
1934	_cma_cancel_listens(id_priv);
1935	mutex_unlock(&lock);
1936}
1937
1938static void cma_cancel_operation(struct rdma_id_private *id_priv,
1939				 enum rdma_cm_state state)
1940{
1941	switch (state) {
1942	case RDMA_CM_ADDR_QUERY:
1943		/*
1944		 * We can avoid doing the rdma_addr_cancel() based on state,
1945		 * only RDMA_CM_ADDR_QUERY has a work that could still execute.
1946		 * Notice that the addr_handler work could still be exiting
1947		 * outside this state, however due to the interaction with the
1948		 * handler_mutex the work is guaranteed not to touch id_priv
1949		 * during exit.
1950		 */
1951		rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1952		break;
1953	case RDMA_CM_ROUTE_QUERY:
1954		cma_cancel_route(id_priv);
1955		break;
1956	case RDMA_CM_LISTEN:
1957		if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1958			cma_cancel_listens(id_priv);
1959		break;
1960	default:
1961		break;
1962	}
1963}
1964
1965static void cma_release_port(struct rdma_id_private *id_priv)
1966{
1967	struct rdma_bind_list *bind_list = id_priv->bind_list;
1968	struct net *net = id_priv->id.route.addr.dev_addr.net;
1969
1970	if (!bind_list)
1971		return;
1972
1973	mutex_lock(&lock);
1974	hlist_del(&id_priv->node);
1975	if (hlist_empty(&bind_list->owners)) {
1976		cma_ps_remove(net, bind_list->ps, bind_list->port);
1977		kfree(bind_list);
1978	}
1979	mutex_unlock(&lock);
1980}
1981
1982static void destroy_mc(struct rdma_id_private *id_priv,
1983		       struct cma_multicast *mc)
1984{
1985	bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
1986
1987	if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
1988		ib_sa_free_multicast(mc->sa_mc);
1989
1990	if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
1991		struct rdma_dev_addr *dev_addr =
1992			&id_priv->id.route.addr.dev_addr;
1993		struct net_device *ndev = NULL;
1994
1995		if (dev_addr->bound_dev_if)
1996			ndev = dev_get_by_index(dev_addr->net,
1997						dev_addr->bound_dev_if);
1998		if (ndev && !send_only) {
1999			enum ib_gid_type gid_type;
2000			union ib_gid mgid;
2001
2002			gid_type = id_priv->cma_dev->default_gid_type
2003					   [id_priv->id.port_num -
2004					    rdma_start_port(
2005						    id_priv->cma_dev->device)];
2006			cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
2007					  gid_type);
2008			cma_igmp_send(ndev, &mgid, false);
2009		}
2010		dev_put(ndev);
2011
2012		cancel_work_sync(&mc->iboe_join.work);
2013	}
2014	kfree(mc);
2015}
2016
2017static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
2018{
2019	struct cma_multicast *mc;
2020
2021	while (!list_empty(&id_priv->mc_list)) {
2022		mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
2023				      list);
2024		list_del(&mc->list);
2025		destroy_mc(id_priv, mc);
2026	}
2027}
2028
2029static void _destroy_id(struct rdma_id_private *id_priv,
2030			enum rdma_cm_state state)
2031{
2032	cma_cancel_operation(id_priv, state);
2033
2034	rdma_restrack_del(&id_priv->res);
2035	cma_remove_id_from_tree(id_priv);
2036	if (id_priv->cma_dev) {
2037		if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
2038			if (id_priv->cm_id.ib)
2039				ib_destroy_cm_id(id_priv->cm_id.ib);
2040		} else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
2041			if (id_priv->cm_id.iw)
2042				iw_destroy_cm_id(id_priv->cm_id.iw);
2043		}
2044		cma_leave_mc_groups(id_priv);
2045		cma_release_dev(id_priv);
2046	}
2047
2048	cma_release_port(id_priv);
2049	cma_id_put(id_priv);
2050	wait_for_completion(&id_priv->comp);
2051
2052	if (id_priv->internal_id)
2053		cma_id_put(id_priv->id.context);
2054
2055	kfree(id_priv->id.route.path_rec);
2056	kfree(id_priv->id.route.path_rec_inbound);
2057	kfree(id_priv->id.route.path_rec_outbound);
2058
2059	put_net(id_priv->id.route.addr.dev_addr.net);
2060	kfree(id_priv);
2061}
2062
2063/*
2064 * destroy an ID from within the handler_mutex. This ensures that no other
2065 * handlers can start running concurrently.
2066 */
2067static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
2068	__releases(&idprv->handler_mutex)
2069{
2070	enum rdma_cm_state state;
2071	unsigned long flags;
2072
2073	trace_cm_id_destroy(id_priv);
2074
2075	/*
2076	 * Setting the state to destroyed under the handler mutex provides a
2077	 * fence against calling handler callbacks. If this is invoked due to
2078	 * the failure of a handler callback then it guarentees that no future
2079	 * handlers will be called.
2080	 */
2081	lockdep_assert_held(&id_priv->handler_mutex);
2082	spin_lock_irqsave(&id_priv->lock, flags);
2083	state = id_priv->state;
2084	id_priv->state = RDMA_CM_DESTROYING;
2085	spin_unlock_irqrestore(&id_priv->lock, flags);
2086	mutex_unlock(&id_priv->handler_mutex);
2087	_destroy_id(id_priv, state);
2088}
2089
2090void rdma_destroy_id(struct rdma_cm_id *id)
2091{
2092	struct rdma_id_private *id_priv =
2093		container_of(id, struct rdma_id_private, id);
2094
2095	mutex_lock(&id_priv->handler_mutex);
2096	destroy_id_handler_unlock(id_priv);
2097}
2098EXPORT_SYMBOL(rdma_destroy_id);
2099
2100static int cma_rep_recv(struct rdma_id_private *id_priv)
2101{
2102	int ret;
2103
2104	ret = cma_modify_qp_rtr(id_priv, NULL);
2105	if (ret)
2106		goto reject;
2107
2108	ret = cma_modify_qp_rts(id_priv, NULL);
2109	if (ret)
2110		goto reject;
2111
2112	trace_cm_send_rtu(id_priv);
2113	ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
2114	if (ret)
2115		goto reject;
2116
2117	return 0;
2118reject:
2119	pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
2120	cma_modify_qp_err(id_priv);
2121	trace_cm_send_rej(id_priv);
2122	ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
2123		       NULL, 0, NULL, 0);
2124	return ret;
2125}
2126
2127static void cma_set_rep_event_data(struct rdma_cm_event *event,
2128				   const struct ib_cm_rep_event_param *rep_data,
2129				   void *private_data)
2130{
2131	event->param.conn.private_data = private_data;
2132	event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
2133	event->param.conn.responder_resources = rep_data->responder_resources;
2134	event->param.conn.initiator_depth = rep_data->initiator_depth;
2135	event->param.conn.flow_control = rep_data->flow_control;
2136	event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
2137	event->param.conn.srq = rep_data->srq;
2138	event->param.conn.qp_num = rep_data->remote_qpn;
2139
2140	event->ece.vendor_id = rep_data->ece.vendor_id;
2141	event->ece.attr_mod = rep_data->ece.attr_mod;
2142}
2143
2144static int cma_cm_event_handler(struct rdma_id_private *id_priv,
2145				struct rdma_cm_event *event)
2146{
2147	int ret;
2148
2149	lockdep_assert_held(&id_priv->handler_mutex);
2150
2151	trace_cm_event_handler(id_priv, event);
2152	ret = id_priv->id.event_handler(&id_priv->id, event);
2153	trace_cm_event_done(id_priv, event, ret);
2154	return ret;
2155}
2156
2157static int cma_ib_handler(struct ib_cm_id *cm_id,
2158			  const struct ib_cm_event *ib_event)
2159{
2160	struct rdma_id_private *id_priv = cm_id->context;
2161	struct rdma_cm_event event = {};
2162	enum rdma_cm_state state;
2163	int ret;
2164
2165	mutex_lock(&id_priv->handler_mutex);
2166	state = READ_ONCE(id_priv->state);
2167	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
2168	     state != RDMA_CM_CONNECT) ||
2169	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
2170	     state != RDMA_CM_DISCONNECT))
2171		goto out;
2172
2173	switch (ib_event->event) {
2174	case IB_CM_REQ_ERROR:
2175	case IB_CM_REP_ERROR:
2176		event.event = RDMA_CM_EVENT_UNREACHABLE;
2177		event.status = -ETIMEDOUT;
2178		break;
2179	case IB_CM_REP_RECEIVED:
2180		if (state == RDMA_CM_CONNECT &&
2181		    (id_priv->id.qp_type != IB_QPT_UD)) {
2182			trace_cm_send_mra(id_priv);
2183			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2184		}
2185		if (id_priv->id.qp) {
2186			event.status = cma_rep_recv(id_priv);
2187			event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
2188						     RDMA_CM_EVENT_ESTABLISHED;
2189		} else {
2190			event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
2191		}
2192		cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
2193				       ib_event->private_data);
2194		break;
2195	case IB_CM_RTU_RECEIVED:
2196	case IB_CM_USER_ESTABLISHED:
2197		event.event = RDMA_CM_EVENT_ESTABLISHED;
2198		break;
2199	case IB_CM_DREQ_ERROR:
2200		event.status = -ETIMEDOUT;
2201		fallthrough;
2202	case IB_CM_DREQ_RECEIVED:
2203	case IB_CM_DREP_RECEIVED:
2204		if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
2205				   RDMA_CM_DISCONNECT))
2206			goto out;
2207		event.event = RDMA_CM_EVENT_DISCONNECTED;
2208		break;
2209	case IB_CM_TIMEWAIT_EXIT:
2210		event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
2211		break;
2212	case IB_CM_MRA_RECEIVED:
2213		/* ignore event */
2214		goto out;
2215	case IB_CM_REJ_RECEIVED:
2216		pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
2217										ib_event->param.rej_rcvd.reason));
2218		cma_modify_qp_err(id_priv);
2219		event.status = ib_event->param.rej_rcvd.reason;
2220		event.event = RDMA_CM_EVENT_REJECTED;
2221		event.param.conn.private_data = ib_event->private_data;
2222		event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
2223		break;
2224	default:
2225		pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2226		       ib_event->event);
2227		goto out;
2228	}
2229
2230	ret = cma_cm_event_handler(id_priv, &event);
2231	if (ret) {
2232		/* Destroy the CM ID by returning a non-zero value. */
2233		id_priv->cm_id.ib = NULL;
2234		destroy_id_handler_unlock(id_priv);
2235		return ret;
2236	}
2237out:
2238	mutex_unlock(&id_priv->handler_mutex);
2239	return 0;
2240}
2241
2242static struct rdma_id_private *
2243cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
2244		   const struct ib_cm_event *ib_event,
2245		   struct net_device *net_dev)
2246{
2247	struct rdma_id_private *listen_id_priv;
2248	struct rdma_id_private *id_priv;
2249	struct rdma_cm_id *id;
2250	struct rdma_route *rt;
2251	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2252	struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
2253	const __be64 service_id =
2254		ib_event->param.req_rcvd.primary_path->service_id;
2255	int ret;
2256
2257	listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2258	id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
2259				   listen_id->event_handler, listen_id->context,
2260				   listen_id->ps,
2261				   ib_event->param.req_rcvd.qp_type,
2262				   listen_id_priv);
2263	if (IS_ERR(id_priv))
2264		return NULL;
2265
2266	id = &id_priv->id;
2267	if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2268			      (struct sockaddr *)&id->route.addr.dst_addr,
2269			      listen_id, ib_event, ss_family, service_id))
2270		goto err;
2271
2272	rt = &id->route;
2273	rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
2274	rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
2275				     sizeof(*rt->path_rec), GFP_KERNEL);
2276	if (!rt->path_rec)
2277		goto err;
2278
2279	rt->path_rec[0] = *path;
2280	if (rt->num_pri_alt_paths == 2)
2281		rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
2282
2283	if (net_dev) {
2284		rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
2285	} else {
2286		if (!cma_protocol_roce(listen_id) &&
2287		    cma_any_addr(cma_src_addr(id_priv))) {
2288			rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
2289			rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
2290			ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
2291		} else if (!cma_any_addr(cma_src_addr(id_priv))) {
2292			ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
2293			if (ret)
2294				goto err;
2295		}
2296	}
2297	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
2298
2299	id_priv->state = RDMA_CM_CONNECT;
2300	return id_priv;
2301
2302err:
2303	rdma_destroy_id(id);
2304	return NULL;
2305}
2306
2307static struct rdma_id_private *
2308cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
2309		  const struct ib_cm_event *ib_event,
2310		  struct net_device *net_dev)
2311{
2312	const struct rdma_id_private *listen_id_priv;
2313	struct rdma_id_private *id_priv;
2314	struct rdma_cm_id *id;
2315	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2316	struct net *net = listen_id->route.addr.dev_addr.net;
2317	int ret;
2318
2319	listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2320	id_priv = __rdma_create_id(net, listen_id->event_handler,
2321				   listen_id->context, listen_id->ps, IB_QPT_UD,
2322				   listen_id_priv);
2323	if (IS_ERR(id_priv))
2324		return NULL;
2325
2326	id = &id_priv->id;
2327	if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2328			      (struct sockaddr *)&id->route.addr.dst_addr,
2329			      listen_id, ib_event, ss_family,
2330			      ib_event->param.sidr_req_rcvd.service_id))
2331		goto err;
2332
2333	if (net_dev) {
2334		rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
2335	} else {
2336		if (!cma_any_addr(cma_src_addr(id_priv))) {
2337			ret = cma_translate_addr(cma_src_addr(id_priv),
2338						 &id->route.addr.dev_addr);
2339			if (ret)
2340				goto err;
2341		}
2342	}
2343
2344	id_priv->state = RDMA_CM_CONNECT;
2345	return id_priv;
2346err:
2347	rdma_destroy_id(id);
2348	return NULL;
2349}
2350
2351static void cma_set_req_event_data(struct rdma_cm_event *event,
2352				   const struct ib_cm_req_event_param *req_data,
2353				   void *private_data, int offset)
2354{
2355	event->param.conn.private_data = private_data + offset;
2356	event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
2357	event->param.conn.responder_resources = req_data->responder_resources;
2358	event->param.conn.initiator_depth = req_data->initiator_depth;
2359	event->param.conn.flow_control = req_data->flow_control;
2360	event->param.conn.retry_count = req_data->retry_count;
2361	event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
2362	event->param.conn.srq = req_data->srq;
2363	event->param.conn.qp_num = req_data->remote_qpn;
2364
2365	event->ece.vendor_id = req_data->ece.vendor_id;
2366	event->ece.attr_mod = req_data->ece.attr_mod;
2367}
2368
2369static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
2370				    const struct ib_cm_event *ib_event)
2371{
2372	return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
2373		 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
2374		((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
2375		 (id->qp_type == IB_QPT_UD)) ||
2376		(!id->qp_type));
2377}
2378
2379static int cma_ib_req_handler(struct ib_cm_id *cm_id,
2380			      const struct ib_cm_event *ib_event)
2381{
2382	struct rdma_id_private *listen_id, *conn_id = NULL;
2383	struct rdma_cm_event event = {};
2384	struct cma_req_info req = {};
2385	struct net_device *net_dev;
2386	u8 offset;
2387	int ret;
2388
2389	listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
2390	if (IS_ERR(listen_id))
2391		return PTR_ERR(listen_id);
2392
2393	trace_cm_req_handler(listen_id, ib_event->event);
2394	if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
2395		ret = -EINVAL;
2396		goto net_dev_put;
2397	}
2398
2399	mutex_lock(&listen_id->handler_mutex);
2400	if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
2401		ret = -ECONNABORTED;
2402		goto err_unlock;
2403	}
2404
2405	offset = cma_user_data_offset(listen_id);
2406	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2407	if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
2408		conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
2409		event.param.ud.private_data = ib_event->private_data + offset;
2410		event.param.ud.private_data_len =
2411				IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
2412	} else {
2413		conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
2414		cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
2415				       ib_event->private_data, offset);
2416	}
2417	if (!conn_id) {
2418		ret = -ENOMEM;
2419		goto err_unlock;
2420	}
2421
2422	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2423	ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
2424	if (ret) {
2425		destroy_id_handler_unlock(conn_id);
2426		goto err_unlock;
2427	}
2428
2429	conn_id->cm_id.ib = cm_id;
2430	cm_id->context = conn_id;
2431	cm_id->cm_handler = cma_ib_handler;
2432
2433	ret = cma_cm_event_handler(conn_id, &event);
2434	if (ret) {
2435		/* Destroy the CM ID by returning a non-zero value. */
2436		conn_id->cm_id.ib = NULL;
2437		mutex_unlock(&listen_id->handler_mutex);
2438		destroy_id_handler_unlock(conn_id);
2439		goto net_dev_put;
2440	}
2441
2442	if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
2443	    conn_id->id.qp_type != IB_QPT_UD) {
2444		trace_cm_send_mra(cm_id->context);
2445		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2446	}
2447	mutex_unlock(&conn_id->handler_mutex);
2448
2449err_unlock:
2450	mutex_unlock(&listen_id->handler_mutex);
2451
2452net_dev_put:
2453	dev_put(net_dev);
2454
2455	return ret;
2456}
2457
2458__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2459{
2460	if (addr->sa_family == AF_IB)
2461		return ((struct sockaddr_ib *) addr)->sib_sid;
2462
2463	return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2464}
2465EXPORT_SYMBOL(rdma_get_service_id);
2466
2467void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
2468		    union ib_gid *dgid)
2469{
2470	struct rdma_addr *addr = &cm_id->route.addr;
2471
2472	if (!cm_id->device) {
2473		if (sgid)
2474			memset(sgid, 0, sizeof(*sgid));
2475		if (dgid)
2476			memset(dgid, 0, sizeof(*dgid));
2477		return;
2478	}
2479
2480	if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
2481		if (sgid)
2482			rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
2483		if (dgid)
2484			rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
2485	} else {
2486		if (sgid)
2487			rdma_addr_get_sgid(&addr->dev_addr, sgid);
2488		if (dgid)
2489			rdma_addr_get_dgid(&addr->dev_addr, dgid);
2490	}
2491}
2492EXPORT_SYMBOL(rdma_read_gids);
2493
2494static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2495{
2496	struct rdma_id_private *id_priv = iw_id->context;
2497	struct rdma_cm_event event = {};
2498	int ret = 0;
2499	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2500	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2501
2502	mutex_lock(&id_priv->handler_mutex);
2503	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
2504		goto out;
2505
2506	switch (iw_event->event) {
2507	case IW_CM_EVENT_CLOSE:
2508		event.event = RDMA_CM_EVENT_DISCONNECTED;
2509		break;
2510	case IW_CM_EVENT_CONNECT_REPLY:
2511		memcpy(cma_src_addr(id_priv), laddr,
2512		       rdma_addr_size(laddr));
2513		memcpy(cma_dst_addr(id_priv), raddr,
2514		       rdma_addr_size(raddr));
2515		switch (iw_event->status) {
2516		case 0:
2517			event.event = RDMA_CM_EVENT_ESTABLISHED;
2518			event.param.conn.initiator_depth = iw_event->ird;
2519			event.param.conn.responder_resources = iw_event->ord;
2520			break;
2521		case -ECONNRESET:
2522		case -ECONNREFUSED:
2523			event.event = RDMA_CM_EVENT_REJECTED;
2524			break;
2525		case -ETIMEDOUT:
2526			event.event = RDMA_CM_EVENT_UNREACHABLE;
2527			break;
2528		default:
2529			event.event = RDMA_CM_EVENT_CONNECT_ERROR;
2530			break;
2531		}
2532		break;
2533	case IW_CM_EVENT_ESTABLISHED:
2534		event.event = RDMA_CM_EVENT_ESTABLISHED;
2535		event.param.conn.initiator_depth = iw_event->ird;
2536		event.param.conn.responder_resources = iw_event->ord;
2537		break;
2538	default:
2539		goto out;
2540	}
2541
2542	event.status = iw_event->status;
2543	event.param.conn.private_data = iw_event->private_data;
2544	event.param.conn.private_data_len = iw_event->private_data_len;
2545	ret = cma_cm_event_handler(id_priv, &event);
2546	if (ret) {
2547		/* Destroy the CM ID by returning a non-zero value. */
2548		id_priv->cm_id.iw = NULL;
2549		destroy_id_handler_unlock(id_priv);
2550		return ret;
2551	}
2552
2553out:
2554	mutex_unlock(&id_priv->handler_mutex);
2555	return ret;
2556}
2557
2558static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2559			       struct iw_cm_event *iw_event)
2560{
2561	struct rdma_id_private *listen_id, *conn_id;
2562	struct rdma_cm_event event = {};
2563	int ret = -ECONNABORTED;
2564	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2565	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2566
2567	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2568	event.param.conn.private_data = iw_event->private_data;
2569	event.param.conn.private_data_len = iw_event->private_data_len;
2570	event.param.conn.initiator_depth = iw_event->ird;
2571	event.param.conn.responder_resources = iw_event->ord;
2572
2573	listen_id = cm_id->context;
2574
2575	mutex_lock(&listen_id->handler_mutex);
2576	if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
2577		goto out;
2578
2579	/* Create a new RDMA id for the new IW CM ID */
2580	conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2581				   listen_id->id.event_handler,
2582				   listen_id->id.context, RDMA_PS_TCP,
2583				   IB_QPT_RC, listen_id);
2584	if (IS_ERR(conn_id)) {
2585		ret = -ENOMEM;
2586		goto out;
2587	}
2588	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2589	conn_id->state = RDMA_CM_CONNECT;
2590
2591	ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2592	if (ret) {
2593		mutex_unlock(&listen_id->handler_mutex);
2594		destroy_id_handler_unlock(conn_id);
2595		return ret;
2596	}
2597
2598	ret = cma_iw_acquire_dev(conn_id, listen_id);
2599	if (ret) {
2600		mutex_unlock(&listen_id->handler_mutex);
2601		destroy_id_handler_unlock(conn_id);
2602		return ret;
2603	}
2604
2605	conn_id->cm_id.iw = cm_id;
2606	cm_id->context = conn_id;
2607	cm_id->cm_handler = cma_iw_handler;
2608
2609	memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
2610	memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
2611
2612	ret = cma_cm_event_handler(conn_id, &event);
2613	if (ret) {
2614		/* User wants to destroy the CM ID */
2615		conn_id->cm_id.iw = NULL;
2616		mutex_unlock(&listen_id->handler_mutex);
2617		destroy_id_handler_unlock(conn_id);
2618		return ret;
2619	}
2620
2621	mutex_unlock(&conn_id->handler_mutex);
2622
2623out:
2624	mutex_unlock(&listen_id->handler_mutex);
2625	return ret;
2626}
2627
2628static int cma_ib_listen(struct rdma_id_private *id_priv)
2629{
2630	struct sockaddr *addr;
2631	struct ib_cm_id	*id;
2632	__be64 svc_id;
2633
2634	addr = cma_src_addr(id_priv);
2635	svc_id = rdma_get_service_id(&id_priv->id, addr);
2636	id = ib_cm_insert_listen(id_priv->id.device,
2637				 cma_ib_req_handler, svc_id);
2638	if (IS_ERR(id))
2639		return PTR_ERR(id);
2640	id_priv->cm_id.ib = id;
2641
2642	return 0;
2643}
2644
2645static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
2646{
2647	int ret;
2648	struct iw_cm_id	*id;
2649
2650	id = iw_create_cm_id(id_priv->id.device,
2651			     iw_conn_req_handler,
2652			     id_priv);
2653	if (IS_ERR(id))
2654		return PTR_ERR(id);
2655
2656	mutex_lock(&id_priv->qp_mutex);
2657	id->tos = id_priv->tos;
2658	id->tos_set = id_priv->tos_set;
2659	mutex_unlock(&id_priv->qp_mutex);
2660	id->afonly = id_priv->afonly;
2661	id_priv->cm_id.iw = id;
2662
2663	memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2664	       rdma_addr_size(cma_src_addr(id_priv)));
2665
2666	ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2667
2668	if (ret) {
2669		iw_destroy_cm_id(id_priv->cm_id.iw);
2670		id_priv->cm_id.iw = NULL;
2671	}
2672
2673	return ret;
2674}
2675
2676static int cma_listen_handler(struct rdma_cm_id *id,
2677			      struct rdma_cm_event *event)
2678{
2679	struct rdma_id_private *id_priv = id->context;
2680
2681	/* Listening IDs are always destroyed on removal */
2682	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
2683		return -1;
2684
2685	id->context = id_priv->id.context;
2686	id->event_handler = id_priv->id.event_handler;
2687	trace_cm_event_handler(id_priv, event);
2688	return id_priv->id.event_handler(id, event);
2689}
2690
2691static int cma_listen_on_dev(struct rdma_id_private *id_priv,
2692			     struct cma_device *cma_dev,
2693			     struct rdma_id_private **to_destroy)
2694{
2695	struct rdma_id_private *dev_id_priv;
2696	struct net *net = id_priv->id.route.addr.dev_addr.net;
2697	int ret;
2698
2699	lockdep_assert_held(&lock);
2700
2701	*to_destroy = NULL;
2702	if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2703		return 0;
2704
2705	dev_id_priv =
2706		__rdma_create_id(net, cma_listen_handler, id_priv,
2707				 id_priv->id.ps, id_priv->id.qp_type, id_priv);
2708	if (IS_ERR(dev_id_priv))
2709		return PTR_ERR(dev_id_priv);
2710
2711	dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2712	memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2713	       rdma_addr_size(cma_src_addr(id_priv)));
2714
2715	_cma_attach_to_dev(dev_id_priv, cma_dev);
2716	rdma_restrack_add(&dev_id_priv->res);
2717	cma_id_get(id_priv);
2718	dev_id_priv->internal_id = 1;
2719	dev_id_priv->afonly = id_priv->afonly;
2720	mutex_lock(&id_priv->qp_mutex);
2721	dev_id_priv->tos_set = id_priv->tos_set;
2722	dev_id_priv->tos = id_priv->tos;
2723	mutex_unlock(&id_priv->qp_mutex);
2724
2725	ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
2726	if (ret)
2727		goto err_listen;
2728	list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
2729	return 0;
2730err_listen:
2731	/* Caller must destroy this after releasing lock */
2732	*to_destroy = dev_id_priv;
2733	dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
2734	return ret;
2735}
2736
2737static int cma_listen_on_all(struct rdma_id_private *id_priv)
2738{
2739	struct rdma_id_private *to_destroy;
2740	struct cma_device *cma_dev;
2741	int ret;
2742
2743	mutex_lock(&lock);
2744	list_add_tail(&id_priv->listen_any_item, &listen_any_list);
2745	list_for_each_entry(cma_dev, &dev_list, list) {
2746		ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
2747		if (ret) {
2748			/* Prevent racing with cma_process_remove() */
2749			if (to_destroy)
2750				list_del_init(&to_destroy->device_item);
2751			goto err_listen;
2752		}
2753	}
2754	mutex_unlock(&lock);
2755	return 0;
2756
2757err_listen:
2758	_cma_cancel_listens(id_priv);
2759	mutex_unlock(&lock);
2760	if (to_destroy)
2761		rdma_destroy_id(&to_destroy->id);
2762	return ret;
2763}
2764
2765void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2766{
2767	struct rdma_id_private *id_priv;
2768
2769	id_priv = container_of(id, struct rdma_id_private, id);
2770	mutex_lock(&id_priv->qp_mutex);
2771	id_priv->tos = (u8) tos;
2772	id_priv->tos_set = true;
2773	mutex_unlock(&id_priv->qp_mutex);
2774}
2775EXPORT_SYMBOL(rdma_set_service_type);
2776
2777/**
2778 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2779 *                          with a connection identifier.
2780 * @id: Communication identifier to associated with service type.
2781 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2782 *
2783 * This function should be called before rdma_connect() on active side,
2784 * and on passive side before rdma_accept(). It is applicable to primary
2785 * path only. The timeout will affect the local side of the QP, it is not
2786 * negotiated with remote side and zero disables the timer. In case it is
2787 * set before rdma_resolve_route, the value will also be used to determine
2788 * PacketLifeTime for RoCE.
2789 *
2790 * Return: 0 for success
2791 */
2792int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
2793{
2794	struct rdma_id_private *id_priv;
2795
2796	if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
2797		return -EINVAL;
2798
2799	id_priv = container_of(id, struct rdma_id_private, id);
2800	mutex_lock(&id_priv->qp_mutex);
2801	id_priv->timeout = timeout;
2802	id_priv->timeout_set = true;
2803	mutex_unlock(&id_priv->qp_mutex);
2804
2805	return 0;
2806}
2807EXPORT_SYMBOL(rdma_set_ack_timeout);
2808
2809/**
2810 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2811 *			      QP associated with a connection identifier.
2812 * @id: Communication identifier to associated with service type.
2813 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2814 *		   Timer Field" in the IBTA specification.
2815 *
2816 * This function should be called before rdma_connect() on active
2817 * side, and on passive side before rdma_accept(). The timer value
2818 * will be associated with the local QP. When it receives a send it is
2819 * not read to handle, typically if the receive queue is empty, an RNR
2820 * Retry NAK is returned to the requester with the min_rnr_timer
2821 * encoded. The requester will then wait at least the time specified
2822 * in the NAK before retrying. The default is zero, which translates
2823 * to a minimum RNR Timer value of 655 ms.
2824 *
2825 * Return: 0 for success
2826 */
2827int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
2828{
2829	struct rdma_id_private *id_priv;
2830
2831	/* It is a five-bit value */
2832	if (min_rnr_timer & 0xe0)
2833		return -EINVAL;
2834
2835	if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
2836		return -EINVAL;
2837
2838	id_priv = container_of(id, struct rdma_id_private, id);
2839	mutex_lock(&id_priv->qp_mutex);
2840	id_priv->min_rnr_timer = min_rnr_timer;
2841	id_priv->min_rnr_timer_set = true;
2842	mutex_unlock(&id_priv->qp_mutex);
2843
2844	return 0;
2845}
2846EXPORT_SYMBOL(rdma_set_min_rnr_timer);
2847
2848static int route_set_path_rec_inbound(struct cma_work *work,
2849				      struct sa_path_rec *path_rec)
2850{
2851	struct rdma_route *route = &work->id->id.route;
2852
2853	if (!route->path_rec_inbound) {
2854		route->path_rec_inbound =
2855			kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
2856		if (!route->path_rec_inbound)
2857			return -ENOMEM;
2858	}
2859
2860	*route->path_rec_inbound = *path_rec;
2861	return 0;
2862}
2863
2864static int route_set_path_rec_outbound(struct cma_work *work,
2865				       struct sa_path_rec *path_rec)
2866{
2867	struct rdma_route *route = &work->id->id.route;
2868
2869	if (!route->path_rec_outbound) {
2870		route->path_rec_outbound =
2871			kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
2872		if (!route->path_rec_outbound)
2873			return -ENOMEM;
2874	}
2875
2876	*route->path_rec_outbound = *path_rec;
2877	return 0;
2878}
2879
2880static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2881			      unsigned int num_prs, void *context)
2882{
2883	struct cma_work *work = context;
2884	struct rdma_route *route;
2885	int i;
2886
2887	route = &work->id->id.route;
2888
2889	if (status)
2890		goto fail;
2891
2892	for (i = 0; i < num_prs; i++) {
2893		if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
2894			*route->path_rec = path_rec[i];
2895		else if (path_rec[i].flags & IB_PATH_INBOUND)
2896			status = route_set_path_rec_inbound(work, &path_rec[i]);
2897		else if (path_rec[i].flags & IB_PATH_OUTBOUND)
2898			status = route_set_path_rec_outbound(work,
2899							     &path_rec[i]);
2900		else
2901			status = -EINVAL;
2902
2903		if (status)
2904			goto fail;
2905	}
2906
2907	route->num_pri_alt_paths = 1;
2908	queue_work(cma_wq, &work->work);
2909	return;
2910
2911fail:
2912	work->old_state = RDMA_CM_ROUTE_QUERY;
2913	work->new_state = RDMA_CM_ADDR_RESOLVED;
2914	work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
2915	work->event.status = status;
2916	pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2917			     status);
2918	queue_work(cma_wq, &work->work);
2919}
2920
2921static int cma_query_ib_route(struct rdma_id_private *id_priv,
2922			      unsigned long timeout_ms, struct cma_work *work)
2923{
2924	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2925	struct sa_path_rec path_rec;
2926	ib_sa_comp_mask comp_mask;
2927	struct sockaddr_in6 *sin6;
2928	struct sockaddr_ib *sib;
2929
2930	memset(&path_rec, 0, sizeof path_rec);
2931
2932	if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
2933		path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
2934	else
2935		path_rec.rec_type = SA_PATH_REC_TYPE_IB;
2936	rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2937	rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2938	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2939	path_rec.numb_path = 1;
2940	path_rec.reversible = 1;
2941	path_rec.service_id = rdma_get_service_id(&id_priv->id,
2942						  cma_dst_addr(id_priv));
2943
2944	comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2945		    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2946		    IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2947
2948	switch (cma_family(id_priv)) {
2949	case AF_INET:
2950		path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2951		comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2952		break;
2953	case AF_INET6:
2954		sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2955		path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2956		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2957		break;
2958	case AF_IB:
2959		sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2960		path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2961		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2962		break;
2963	}
2964
2965	id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2966					       id_priv->id.port_num, &path_rec,
2967					       comp_mask, timeout_ms,
2968					       GFP_KERNEL, cma_query_handler,
2969					       work, &id_priv->query);
2970
2971	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2972}
2973
2974static void cma_iboe_join_work_handler(struct work_struct *work)
2975{
2976	struct cma_multicast *mc =
2977		container_of(work, struct cma_multicast, iboe_join.work);
2978	struct rdma_cm_event *event = &mc->iboe_join.event;
2979	struct rdma_id_private *id_priv = mc->id_priv;
2980	int ret;
2981
2982	mutex_lock(&id_priv->handler_mutex);
2983	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
2984	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
2985		goto out_unlock;
2986
2987	ret = cma_cm_event_handler(id_priv, event);
2988	WARN_ON(ret);
2989
2990out_unlock:
2991	mutex_unlock(&id_priv->handler_mutex);
2992	if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
2993		rdma_destroy_ah_attr(&event->param.ud.ah_attr);
2994}
2995
2996static void cma_work_handler(struct work_struct *_work)
2997{
2998	struct cma_work *work = container_of(_work, struct cma_work, work);
2999	struct rdma_id_private *id_priv = work->id;
3000
3001	mutex_lock(&id_priv->handler_mutex);
3002	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
3003	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
3004		goto out_unlock;
3005	if (work->old_state != 0 || work->new_state != 0) {
3006		if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
3007			goto out_unlock;
3008	}
3009
3010	if (cma_cm_event_handler(id_priv, &work->event)) {
3011		cma_id_put(id_priv);
3012		destroy_id_handler_unlock(id_priv);
3013		goto out_free;
3014	}
3015
3016out_unlock:
3017	mutex_unlock(&id_priv->handler_mutex);
3018	cma_id_put(id_priv);
3019out_free:
3020	if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
3021		rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
3022	kfree(work);
3023}
3024
3025static void cma_init_resolve_route_work(struct cma_work *work,
3026					struct rdma_id_private *id_priv)
3027{
3028	work->id = id_priv;
3029	INIT_WORK(&work->work, cma_work_handler);
3030	work->old_state = RDMA_CM_ROUTE_QUERY;
3031	work->new_state = RDMA_CM_ROUTE_RESOLVED;
3032	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
3033}
3034
3035static void enqueue_resolve_addr_work(struct cma_work *work,
3036				      struct rdma_id_private *id_priv)
3037{
3038	/* Balances with cma_id_put() in cma_work_handler */
3039	cma_id_get(id_priv);
3040
3041	work->id = id_priv;
3042	INIT_WORK(&work->work, cma_work_handler);
3043	work->old_state = RDMA_CM_ADDR_QUERY;
3044	work->new_state = RDMA_CM_ADDR_RESOLVED;
3045	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3046
3047	queue_work(cma_wq, &work->work);
3048}
3049
3050static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
3051				unsigned long timeout_ms)
3052{
3053	struct rdma_route *route = &id_priv->id.route;
3054	struct cma_work *work;
3055	int ret;
3056
3057	work = kzalloc(sizeof *work, GFP_KERNEL);
3058	if (!work)
3059		return -ENOMEM;
3060
3061	cma_init_resolve_route_work(work, id_priv);
3062
3063	if (!route->path_rec)
3064		route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
3065	if (!route->path_rec) {
3066		ret = -ENOMEM;
3067		goto err1;
3068	}
3069
3070	ret = cma_query_ib_route(id_priv, timeout_ms, work);
3071	if (ret)
3072		goto err2;
3073
3074	return 0;
3075err2:
3076	kfree(route->path_rec);
3077	route->path_rec = NULL;
3078err1:
3079	kfree(work);
3080	return ret;
3081}
3082
3083static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
3084					   unsigned long supported_gids,
3085					   enum ib_gid_type default_gid)
3086{
3087	if ((network_type == RDMA_NETWORK_IPV4 ||
3088	     network_type == RDMA_NETWORK_IPV6) &&
3089	    test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
3090		return IB_GID_TYPE_ROCE_UDP_ENCAP;
3091
3092	return default_gid;
3093}
3094
3095/*
3096 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
3097 * path record type based on GID type.
3098 * It also sets up other L2 fields which includes destination mac address
3099 * netdev ifindex, of the path record.
3100 * It returns the netdev of the bound interface for this path record entry.
3101 */
3102static struct net_device *
3103cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
3104{
3105	struct rdma_route *route = &id_priv->id.route;
3106	enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
3107	struct rdma_addr *addr = &route->addr;
3108	unsigned long supported_gids;
3109	struct net_device *ndev;
3110
3111	if (!addr->dev_addr.bound_dev_if)
3112		return NULL;
3113
3114	ndev = dev_get_by_index(addr->dev_addr.net,
3115				addr->dev_addr.bound_dev_if);
3116	if (!ndev)
3117		return NULL;
3118
3119	supported_gids = roce_gid_type_mask_support(id_priv->id.device,
3120						    id_priv->id.port_num);
3121	gid_type = cma_route_gid_type(addr->dev_addr.network,
3122				      supported_gids,
3123				      id_priv->gid_type);
3124	/* Use the hint from IP Stack to select GID Type */
3125	if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
3126		gid_type = ib_network_to_gid_type(addr->dev_addr.network);
3127	route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
3128
3129	route->path_rec->roce.route_resolved = true;
3130	sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
3131	return ndev;
3132}
3133
3134int rdma_set_ib_path(struct rdma_cm_id *id,
3135		     struct sa_path_rec *path_rec)
3136{
3137	struct rdma_id_private *id_priv;
3138	struct net_device *ndev;
3139	int ret;
3140
3141	id_priv = container_of(id, struct rdma_id_private, id);
3142	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3143			   RDMA_CM_ROUTE_RESOLVED))
3144		return -EINVAL;
3145
3146	id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
3147				     GFP_KERNEL);
3148	if (!id->route.path_rec) {
3149		ret = -ENOMEM;
3150		goto err;
3151	}
3152
3153	if (rdma_protocol_roce(id->device, id->port_num)) {
3154		ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3155		if (!ndev) {
3156			ret = -ENODEV;
3157			goto err_free;
3158		}
3159		dev_put(ndev);
3160	}
3161
3162	id->route.num_pri_alt_paths = 1;
3163	return 0;
3164
3165err_free:
3166	kfree(id->route.path_rec);
3167	id->route.path_rec = NULL;
3168err:
3169	cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
3170	return ret;
3171}
3172EXPORT_SYMBOL(rdma_set_ib_path);
3173
3174static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
3175{
3176	struct cma_work *work;
3177
3178	work = kzalloc(sizeof *work, GFP_KERNEL);
3179	if (!work)
3180		return -ENOMEM;
3181
3182	cma_init_resolve_route_work(work, id_priv);
3183	queue_work(cma_wq, &work->work);
3184	return 0;
3185}
3186
3187static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
3188{
3189	struct net_device *dev;
3190
3191	dev = vlan_dev_real_dev(vlan_ndev);
3192	if (dev->num_tc)
3193		return netdev_get_prio_tc_map(dev, prio);
3194
3195	return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
3196		VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3197}
3198
3199struct iboe_prio_tc_map {
3200	int input_prio;
3201	int output_tc;
3202	bool found;
3203};
3204
3205static int get_lower_vlan_dev_tc(struct net_device *dev,
3206				 struct netdev_nested_priv *priv)
3207{
3208	struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
3209
3210	if (is_vlan_dev(dev))
3211		map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
3212	else if (dev->num_tc)
3213		map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
3214	else
3215		map->output_tc = 0;
3216	/* We are interested only in first level VLAN device, so always
3217	 * return 1 to stop iterating over next level devices.
3218	 */
3219	map->found = true;
3220	return 1;
3221}
3222
3223static int iboe_tos_to_sl(struct net_device *ndev, int tos)
3224{
3225	struct iboe_prio_tc_map prio_tc_map = {};
3226	int prio = rt_tos2priority(tos);
3227	struct netdev_nested_priv priv;
3228
3229	/* If VLAN device, get it directly from the VLAN netdev */
3230	if (is_vlan_dev(ndev))
3231		return get_vlan_ndev_tc(ndev, prio);
3232
3233	prio_tc_map.input_prio = prio;
3234	priv.data = (void *)&prio_tc_map;
3235	rcu_read_lock();
3236	netdev_walk_all_lower_dev_rcu(ndev,
3237				      get_lower_vlan_dev_tc,
3238				      &priv);
3239	rcu_read_unlock();
3240	/* If map is found from lower device, use it; Otherwise
3241	 * continue with the current netdevice to get priority to tc map.
3242	 */
3243	if (prio_tc_map.found)
3244		return prio_tc_map.output_tc;
3245	else if (ndev->num_tc)
3246		return netdev_get_prio_tc_map(ndev, prio);
3247	else
3248		return 0;
3249}
3250
3251static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
3252{
3253	struct sockaddr_in6 *addr6;
3254	u16 dport, sport;
3255	u32 hash, fl;
3256
3257	addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
3258	fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
3259	if ((cma_family(id_priv) != AF_INET6) || !fl) {
3260		dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
3261		sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
3262		hash = (u32)sport * 31 + dport;
3263		fl = hash & IB_GRH_FLOWLABEL_MASK;
3264	}
3265
3266	return cpu_to_be32(fl);
3267}
3268
3269static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
3270{
3271	struct rdma_route *route = &id_priv->id.route;
3272	struct rdma_addr *addr = &route->addr;
3273	struct cma_work *work;
3274	int ret;
3275	struct net_device *ndev;
3276
3277	u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
3278					rdma_start_port(id_priv->cma_dev->device)];
3279	u8 tos;
3280
3281	mutex_lock(&id_priv->qp_mutex);
3282	tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
3283	mutex_unlock(&id_priv->qp_mutex);
3284
3285	work = kzalloc(sizeof *work, GFP_KERNEL);
3286	if (!work)
3287		return -ENOMEM;
3288
3289	route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
3290	if (!route->path_rec) {
3291		ret = -ENOMEM;
3292		goto err1;
3293	}
3294
3295	route->num_pri_alt_paths = 1;
3296
3297	ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3298	if (!ndev) {
3299		ret = -ENODEV;
3300		goto err2;
3301	}
3302
3303	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3304		    &route->path_rec->sgid);
3305	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
3306		    &route->path_rec->dgid);
3307
3308	if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
3309		/* TODO: get the hoplimit from the inet/inet6 device */
3310		route->path_rec->hop_limit = addr->dev_addr.hoplimit;
3311	else
3312		route->path_rec->hop_limit = 1;
3313	route->path_rec->reversible = 1;
3314	route->path_rec->pkey = cpu_to_be16(0xffff);
3315	route->path_rec->mtu_selector = IB_SA_EQ;
3316	route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
3317	route->path_rec->traffic_class = tos;
3318	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
3319	route->path_rec->rate_selector = IB_SA_EQ;
3320	route->path_rec->rate = IB_RATE_PORT_CURRENT;
3321	dev_put(ndev);
3322	route->path_rec->packet_life_time_selector = IB_SA_EQ;
3323	/* In case ACK timeout is set, use this value to calculate
3324	 * PacketLifeTime.  As per IBTA 12.7.34,
3325	 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
3326	 * Assuming a negligible local ACK delay, we can use
3327	 * PacketLifeTime = local ACK timeout/2
3328	 * as a reasonable approximation for RoCE networks.
3329	 */
3330	mutex_lock(&id_priv->qp_mutex);
3331	if (id_priv->timeout_set && id_priv->timeout)
3332		route->path_rec->packet_life_time = id_priv->timeout - 1;
3333	else
3334		route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
3335	mutex_unlock(&id_priv->qp_mutex);
3336
3337	if (!route->path_rec->mtu) {
3338		ret = -EINVAL;
3339		goto err2;
3340	}
3341
3342	if (rdma_protocol_roce_udp_encap(id_priv->id.device,
3343					 id_priv->id.port_num))
3344		route->path_rec->flow_label =
3345			cma_get_roce_udp_flow_label(id_priv);
3346
3347	cma_init_resolve_route_work(work, id_priv);
3348	queue_work(cma_wq, &work->work);
3349
3350	return 0;
3351
3352err2:
3353	kfree(route->path_rec);
3354	route->path_rec = NULL;
3355	route->num_pri_alt_paths = 0;
3356err1:
3357	kfree(work);
3358	return ret;
3359}
3360
3361int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
3362{
3363	struct rdma_id_private *id_priv;
3364	int ret;
3365
3366	if (!timeout_ms)
3367		return -EINVAL;
3368
3369	id_priv = container_of(id, struct rdma_id_private, id);
3370	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
3371		return -EINVAL;
3372
3373	cma_id_get(id_priv);
3374	if (rdma_cap_ib_sa(id->device, id->port_num))
3375		ret = cma_resolve_ib_route(id_priv, timeout_ms);
3376	else if (rdma_protocol_roce(id->device, id->port_num)) {
3377		ret = cma_resolve_iboe_route(id_priv);
3378		if (!ret)
3379			cma_add_id_to_tree(id_priv);
3380	}
3381	else if (rdma_protocol_iwarp(id->device, id->port_num))
3382		ret = cma_resolve_iw_route(id_priv);
3383	else
3384		ret = -ENOSYS;
3385
3386	if (ret)
3387		goto err;
3388
3389	return 0;
3390err:
3391	cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
3392	cma_id_put(id_priv);
3393	return ret;
3394}
3395EXPORT_SYMBOL(rdma_resolve_route);
3396
3397static void cma_set_loopback(struct sockaddr *addr)
3398{
3399	switch (addr->sa_family) {
3400	case AF_INET:
3401		((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
3402		break;
3403	case AF_INET6:
3404		ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
3405			      0, 0, 0, htonl(1));
3406		break;
3407	default:
3408		ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
3409			    0, 0, 0, htonl(1));
3410		break;
3411	}
3412}
3413
3414static int cma_bind_loopback(struct rdma_id_private *id_priv)
3415{
3416	struct cma_device *cma_dev, *cur_dev;
3417	union ib_gid gid;
3418	enum ib_port_state port_state;
3419	unsigned int p;
3420	u16 pkey;
3421	int ret;
3422
3423	cma_dev = NULL;
3424	mutex_lock(&lock);
3425	list_for_each_entry(cur_dev, &dev_list, list) {
3426		if (cma_family(id_priv) == AF_IB &&
3427		    !rdma_cap_ib_cm(cur_dev->device, 1))
3428			continue;
3429
3430		if (!cma_dev)
3431			cma_dev = cur_dev;
3432
3433		rdma_for_each_port (cur_dev->device, p) {
3434			if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
3435			    port_state == IB_PORT_ACTIVE) {
3436				cma_dev = cur_dev;
3437				goto port_found;
3438			}
3439		}
3440	}
3441
3442	if (!cma_dev) {
3443		ret = -ENODEV;
3444		goto out;
3445	}
3446
3447	p = 1;
3448
3449port_found:
3450	ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
3451	if (ret)
3452		goto out;
3453
3454	ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
3455	if (ret)
3456		goto out;
3457
3458	id_priv->id.route.addr.dev_addr.dev_type =
3459		(rdma_protocol_ib(cma_dev->device, p)) ?
3460		ARPHRD_INFINIBAND : ARPHRD_ETHER;
3461
3462	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3463	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
3464	id_priv->id.port_num = p;
3465	cma_attach_to_dev(id_priv, cma_dev);
3466	rdma_restrack_add(&id_priv->res);
3467	cma_set_loopback(cma_src_addr(id_priv));
3468out:
3469	mutex_unlock(&lock);
3470	return ret;
3471}
3472
3473static void addr_handler(int status, struct sockaddr *src_addr,
3474			 struct rdma_dev_addr *dev_addr, void *context)
3475{
3476	struct rdma_id_private *id_priv = context;
3477	struct rdma_cm_event event = {};
3478	struct sockaddr *addr;
3479	struct sockaddr_storage old_addr;
3480
3481	mutex_lock(&id_priv->handler_mutex);
3482	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
3483			   RDMA_CM_ADDR_RESOLVED))
3484		goto out;
3485
3486	/*
3487	 * Store the previous src address, so that if we fail to acquire
3488	 * matching rdma device, old address can be restored back, which helps
3489	 * to cancel the cma listen operation correctly.
3490	 */
3491	addr = cma_src_addr(id_priv);
3492	memcpy(&old_addr, addr, rdma_addr_size(addr));
3493	memcpy(addr, src_addr, rdma_addr_size(src_addr));
3494	if (!status && !id_priv->cma_dev) {
3495		status = cma_acquire_dev_by_src_ip(id_priv);
3496		if (status)
3497			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3498					     status);
3499		rdma_restrack_add(&id_priv->res);
3500	} else if (status) {
3501		pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
3502	}
3503
3504	if (status) {
3505		memcpy(addr, &old_addr,
3506		       rdma_addr_size((struct sockaddr *)&old_addr));
3507		if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3508				   RDMA_CM_ADDR_BOUND))
3509			goto out;
3510		event.event = RDMA_CM_EVENT_ADDR_ERROR;
3511		event.status = status;
3512	} else
3513		event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3514
3515	if (cma_cm_event_handler(id_priv, &event)) {
3516		destroy_id_handler_unlock(id_priv);
3517		return;
3518	}
3519out:
3520	mutex_unlock(&id_priv->handler_mutex);
3521}
3522
3523static int cma_resolve_loopback(struct rdma_id_private *id_priv)
3524{
3525	struct cma_work *work;
3526	union ib_gid gid;
3527	int ret;
3528
3529	work = kzalloc(sizeof *work, GFP_KERNEL);
3530	if (!work)
3531		return -ENOMEM;
3532
3533	if (!id_priv->cma_dev) {
3534		ret = cma_bind_loopback(id_priv);
3535		if (ret)
3536			goto err;
3537	}
3538
3539	rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3540	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
3541
3542	enqueue_resolve_addr_work(work, id_priv);
3543	return 0;
3544err:
3545	kfree(work);
3546	return ret;
3547}
3548
3549static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
3550{
3551	struct cma_work *work;
3552	int ret;
3553
3554	work = kzalloc(sizeof *work, GFP_KERNEL);
3555	if (!work)
3556		return -ENOMEM;
3557
3558	if (!id_priv->cma_dev) {
3559		ret = cma_resolve_ib_dev(id_priv);
3560		if (ret)
3561			goto err;
3562	}
3563
3564	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
3565		&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
3566
3567	enqueue_resolve_addr_work(work, id_priv);
3568	return 0;
3569err:
3570	kfree(work);
3571	return ret;
3572}
3573
3574int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3575{
3576	struct rdma_id_private *id_priv;
3577	unsigned long flags;
3578	int ret;
3579
3580	id_priv = container_of(id, struct rdma_id_private, id);
3581	spin_lock_irqsave(&id_priv->lock, flags);
3582	if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
3583	    id_priv->state == RDMA_CM_IDLE) {
3584		id_priv->reuseaddr = reuse;
3585		ret = 0;
3586	} else {
3587		ret = -EINVAL;
3588	}
3589	spin_unlock_irqrestore(&id_priv->lock, flags);
3590	return ret;
3591}
3592EXPORT_SYMBOL(rdma_set_reuseaddr);
3593
3594int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
3595{
3596	struct rdma_id_private *id_priv;
3597	unsigned long flags;
3598	int ret;
3599
3600	id_priv = container_of(id, struct rdma_id_private, id);
3601	spin_lock_irqsave(&id_priv->lock, flags);
3602	if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
3603		id_priv->options |= (1 << CMA_OPTION_AFONLY);
3604		id_priv->afonly = afonly;
3605		ret = 0;
3606	} else {
3607		ret = -EINVAL;
3608	}
3609	spin_unlock_irqrestore(&id_priv->lock, flags);
3610	return ret;
3611}
3612EXPORT_SYMBOL(rdma_set_afonly);
3613
3614static void cma_bind_port(struct rdma_bind_list *bind_list,
3615			  struct rdma_id_private *id_priv)
3616{
3617	struct sockaddr *addr;
3618	struct sockaddr_ib *sib;
3619	u64 sid, mask;
3620	__be16 port;
3621
3622	lockdep_assert_held(&lock);
3623
3624	addr = cma_src_addr(id_priv);
3625	port = htons(bind_list->port);
3626
3627	switch (addr->sa_family) {
3628	case AF_INET:
3629		((struct sockaddr_in *) addr)->sin_port = port;
3630		break;
3631	case AF_INET6:
3632		((struct sockaddr_in6 *) addr)->sin6_port = port;
3633		break;
3634	case AF_IB:
3635		sib = (struct sockaddr_ib *) addr;
3636		sid = be64_to_cpu(sib->sib_sid);
3637		mask = be64_to_cpu(sib->sib_sid_mask);
3638		sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
3639		sib->sib_sid_mask = cpu_to_be64(~0ULL);
3640		break;
3641	}
3642	id_priv->bind_list = bind_list;
3643	hlist_add_head(&id_priv->node, &bind_list->owners);
3644}
3645
3646static int cma_alloc_port(enum rdma_ucm_port_space ps,
3647			  struct rdma_id_private *id_priv, unsigned short snum)
3648{
3649	struct rdma_bind_list *bind_list;
3650	int ret;
3651
3652	lockdep_assert_held(&lock);
3653
3654	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
3655	if (!bind_list)
3656		return -ENOMEM;
3657
3658	ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3659			   snum);
3660	if (ret < 0)
3661		goto err;
3662
3663	bind_list->ps = ps;
3664	bind_list->port = snum;
3665	cma_bind_port(bind_list, id_priv);
3666	return 0;
3667err:
3668	kfree(bind_list);
3669	return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
3670}
3671
3672static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3673			      struct rdma_id_private *id_priv)
3674{
3675	struct rdma_id_private *cur_id;
3676	struct sockaddr  *daddr = cma_dst_addr(id_priv);
3677	struct sockaddr  *saddr = cma_src_addr(id_priv);
3678	__be16 dport = cma_port(daddr);
3679
3680	lockdep_assert_held(&lock);
3681
3682	hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3683		struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
3684		struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
3685		__be16 cur_dport = cma_port(cur_daddr);
3686
3687		if (id_priv == cur_id)
3688			continue;
3689
3690		/* different dest port -> unique */
3691		if (!cma_any_port(daddr) &&
3692		    !cma_any_port(cur_daddr) &&
3693		    (dport != cur_dport))
3694			continue;
3695
3696		/* different src address -> unique */
3697		if (!cma_any_addr(saddr) &&
3698		    !cma_any_addr(cur_saddr) &&
3699		    cma_addr_cmp(saddr, cur_saddr))
3700			continue;
3701
3702		/* different dst address -> unique */
3703		if (!cma_any_addr(daddr) &&
3704		    !cma_any_addr(cur_daddr) &&
3705		    cma_addr_cmp(daddr, cur_daddr))
3706			continue;
3707
3708		return -EADDRNOTAVAIL;
3709	}
3710	return 0;
3711}
3712
3713static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
3714			      struct rdma_id_private *id_priv)
3715{
3716	static unsigned int last_used_port;
3717	int low, high, remaining;
3718	unsigned int rover;
3719	struct net *net = id_priv->id.route.addr.dev_addr.net;
3720
3721	lockdep_assert_held(&lock);
3722
3723	inet_get_local_port_range(net, &low, &high);
3724	remaining = (high - low) + 1;
3725	rover = get_random_u32_inclusive(low, remaining + low - 1);
3726retry:
3727	if (last_used_port != rover) {
3728		struct rdma_bind_list *bind_list;
3729		int ret;
3730
3731		bind_list = cma_ps_find(net, ps, (unsigned short)rover);
3732
3733		if (!bind_list) {
3734			ret = cma_alloc_port(ps, id_priv, rover);
3735		} else {
3736			ret = cma_port_is_unique(bind_list, id_priv);
3737			if (!ret)
3738				cma_bind_port(bind_list, id_priv);
3739		}
3740		/*
3741		 * Remember previously used port number in order to avoid
3742		 * re-using same port immediately after it is closed.
3743		 */
3744		if (!ret)
3745			last_used_port = rover;
3746		if (ret != -EADDRNOTAVAIL)
3747			return ret;
3748	}
3749	if (--remaining) {
3750		rover++;
3751		if ((rover < low) || (rover > high))
3752			rover = low;
3753		goto retry;
3754	}
3755	return -EADDRNOTAVAIL;
3756}
3757
3758/*
3759 * Check that the requested port is available.  This is called when trying to
3760 * bind to a specific port, or when trying to listen on a bound port.  In
3761 * the latter case, the provided id_priv may already be on the bind_list, but
3762 * we still need to check that it's okay to start listening.
3763 */
3764static int cma_check_port(struct rdma_bind_list *bind_list,
3765			  struct rdma_id_private *id_priv, uint8_t reuseaddr)
3766{
3767	struct rdma_id_private *cur_id;
3768	struct sockaddr *addr, *cur_addr;
3769
3770	lockdep_assert_held(&lock);
3771
3772	addr = cma_src_addr(id_priv);
3773	hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3774		if (id_priv == cur_id)
3775			continue;
3776
3777		if (reuseaddr && cur_id->reuseaddr)
3778			continue;
3779
3780		cur_addr = cma_src_addr(cur_id);
3781		if (id_priv->afonly && cur_id->afonly &&
3782		    (addr->sa_family != cur_addr->sa_family))
3783			continue;
3784
3785		if (cma_any_addr(addr) || cma_any_addr(cur_addr))
3786			return -EADDRNOTAVAIL;
3787
3788		if (!cma_addr_cmp(addr, cur_addr))
3789			return -EADDRINUSE;
3790	}
3791	return 0;
3792}
3793
3794static int cma_use_port(enum rdma_ucm_port_space ps,
3795			struct rdma_id_private *id_priv)
3796{
3797	struct rdma_bind_list *bind_list;
3798	unsigned short snum;
3799	int ret;
3800
3801	lockdep_assert_held(&lock);
3802
3803	snum = ntohs(cma_port(cma_src_addr(id_priv)));
3804	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
3805		return -EACCES;
3806
3807	bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3808	if (!bind_list) {
3809		ret = cma_alloc_port(ps, id_priv, snum);
3810	} else {
3811		ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
3812		if (!ret)
3813			cma_bind_port(bind_list, id_priv);
3814	}
3815	return ret;
3816}
3817
3818static enum rdma_ucm_port_space
3819cma_select_inet_ps(struct rdma_id_private *id_priv)
3820{
3821	switch (id_priv->id.ps) {
3822	case RDMA_PS_TCP:
3823	case RDMA_PS_UDP:
3824	case RDMA_PS_IPOIB:
3825	case RDMA_PS_IB:
3826		return id_priv->id.ps;
3827	default:
3828
3829		return 0;
3830	}
3831}
3832
3833static enum rdma_ucm_port_space
3834cma_select_ib_ps(struct rdma_id_private *id_priv)
3835{
3836	enum rdma_ucm_port_space ps = 0;
3837	struct sockaddr_ib *sib;
3838	u64 sid_ps, mask, sid;
3839
3840	sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
3841	mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
3842	sid = be64_to_cpu(sib->sib_sid) & mask;
3843
3844	if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
3845		sid_ps = RDMA_IB_IP_PS_IB;
3846		ps = RDMA_PS_IB;
3847	} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
3848		   (sid == (RDMA_IB_IP_PS_TCP & mask))) {
3849		sid_ps = RDMA_IB_IP_PS_TCP;
3850		ps = RDMA_PS_TCP;
3851	} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
3852		   (sid == (RDMA_IB_IP_PS_UDP & mask))) {
3853		sid_ps = RDMA_IB_IP_PS_UDP;
3854		ps = RDMA_PS_UDP;
3855	}
3856
3857	if (ps) {
3858		sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
3859		sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
3860						be64_to_cpu(sib->sib_sid_mask));
3861	}
3862	return ps;
3863}
3864
3865static int cma_get_port(struct rdma_id_private *id_priv)
3866{
3867	enum rdma_ucm_port_space ps;
3868	int ret;
3869
3870	if (cma_family(id_priv) != AF_IB)
3871		ps = cma_select_inet_ps(id_priv);
3872	else
3873		ps = cma_select_ib_ps(id_priv);
3874	if (!ps)
3875		return -EPROTONOSUPPORT;
3876
3877	mutex_lock(&lock);
3878	if (cma_any_port(cma_src_addr(id_priv)))
3879		ret = cma_alloc_any_port(ps, id_priv);
3880	else
3881		ret = cma_use_port(ps, id_priv);
3882	mutex_unlock(&lock);
3883
3884	return ret;
3885}
3886
3887static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
3888			       struct sockaddr *addr)
3889{
3890#if IS_ENABLED(CONFIG_IPV6)
3891	struct sockaddr_in6 *sin6;
3892
3893	if (addr->sa_family != AF_INET6)
3894		return 0;
3895
3896	sin6 = (struct sockaddr_in6 *) addr;
3897
3898	if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
3899		return 0;
3900
3901	if (!sin6->sin6_scope_id)
3902			return -EINVAL;
3903
3904	dev_addr->bound_dev_if = sin6->sin6_scope_id;
3905#endif
3906	return 0;
3907}
3908
3909int rdma_listen(struct rdma_cm_id *id, int backlog)
3910{
3911	struct rdma_id_private *id_priv =
3912		container_of(id, struct rdma_id_private, id);
3913	int ret;
3914
3915	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
3916		struct sockaddr_in any_in = {
3917			.sin_family = AF_INET,
3918			.sin_addr.s_addr = htonl(INADDR_ANY),
3919		};
3920
3921		/* For a well behaved ULP state will be RDMA_CM_IDLE */
3922		ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
3923		if (ret)
3924			return ret;
3925		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
3926					   RDMA_CM_LISTEN)))
3927			return -EINVAL;
3928	}
3929
3930	/*
3931	 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
3932	 * any more, and has to be unique in the bind list.
3933	 */
3934	if (id_priv->reuseaddr) {
3935		mutex_lock(&lock);
3936		ret = cma_check_port(id_priv->bind_list, id_priv, 0);
3937		if (!ret)
3938			id_priv->reuseaddr = 0;
3939		mutex_unlock(&lock);
3940		if (ret)
3941			goto err;
3942	}
3943
3944	id_priv->backlog = backlog;
3945	if (id_priv->cma_dev) {
3946		if (rdma_cap_ib_cm(id->device, 1)) {
3947			ret = cma_ib_listen(id_priv);
3948			if (ret)
3949				goto err;
3950		} else if (rdma_cap_iw_cm(id->device, 1)) {
3951			ret = cma_iw_listen(id_priv, backlog);
3952			if (ret)
3953				goto err;
3954		} else {
3955			ret = -ENOSYS;
3956			goto err;
3957		}
3958	} else {
3959		ret = cma_listen_on_all(id_priv);
3960		if (ret)
3961			goto err;
3962	}
3963
3964	return 0;
3965err:
3966	id_priv->backlog = 0;
3967	/*
3968	 * All the failure paths that lead here will not allow the req_handler's
3969	 * to have run.
3970	 */
3971	cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
3972	return ret;
3973}
3974EXPORT_SYMBOL(rdma_listen);
3975
3976static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
3977			      struct sockaddr *addr, const struct sockaddr *daddr)
3978{
3979	struct sockaddr *id_daddr;
3980	int ret;
3981
3982	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
3983	    addr->sa_family != AF_IB)
3984		return -EAFNOSUPPORT;
3985
3986	if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
3987		return -EINVAL;
3988
3989	ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
3990	if (ret)
3991		goto err1;
3992
3993	memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
3994	if (!cma_any_addr(addr)) {
3995		ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
3996		if (ret)
3997			goto err1;
3998
3999		ret = cma_acquire_dev_by_src_ip(id_priv);
4000		if (ret)
4001			goto err1;
4002	}
4003
4004	if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
4005		if (addr->sa_family == AF_INET)
4006			id_priv->afonly = 1;
4007#if IS_ENABLED(CONFIG_IPV6)
4008		else if (addr->sa_family == AF_INET6) {
4009			struct net *net = id_priv->id.route.addr.dev_addr.net;
4010
4011			id_priv->afonly = net->ipv6.sysctl.bindv6only;
4012		}
4013#endif
4014	}
4015	id_daddr = cma_dst_addr(id_priv);
4016	if (daddr != id_daddr)
4017		memcpy(id_daddr, daddr, rdma_addr_size(addr));
4018	id_daddr->sa_family = addr->sa_family;
4019
4020	ret = cma_get_port(id_priv);
4021	if (ret)
4022		goto err2;
4023
4024	if (!cma_any_addr(addr))
4025		rdma_restrack_add(&id_priv->res);
4026	return 0;
4027err2:
4028	if (id_priv->cma_dev)
4029		cma_release_dev(id_priv);
4030err1:
4031	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
4032	return ret;
4033}
4034
4035static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
4036			 const struct sockaddr *dst_addr)
4037{
4038	struct rdma_id_private *id_priv =
4039		container_of(id, struct rdma_id_private, id);
4040	struct sockaddr_storage zero_sock = {};
4041
4042	if (src_addr && src_addr->sa_family)
4043		return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
4044
4045	/*
4046	 * When the src_addr is not specified, automatically supply an any addr
4047	 */
4048	zero_sock.ss_family = dst_addr->sa_family;
4049	if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
4050		struct sockaddr_in6 *src_addr6 =
4051			(struct sockaddr_in6 *)&zero_sock;
4052		struct sockaddr_in6 *dst_addr6 =
4053			(struct sockaddr_in6 *)dst_addr;
4054
4055		src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
4056		if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
4057			id->route.addr.dev_addr.bound_dev_if =
4058				dst_addr6->sin6_scope_id;
4059	} else if (dst_addr->sa_family == AF_IB) {
4060		((struct sockaddr_ib *)&zero_sock)->sib_pkey =
4061			((struct sockaddr_ib *)dst_addr)->sib_pkey;
4062	}
4063	return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
4064}
4065
4066/*
4067 * If required, resolve the source address for bind and leave the id_priv in
4068 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
4069 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
4070 * ignored.
4071 */
4072static int resolve_prepare_src(struct rdma_id_private *id_priv,
4073			       struct sockaddr *src_addr,
4074			       const struct sockaddr *dst_addr)
4075{
4076	int ret;
4077
4078	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
4079		/* For a well behaved ULP state will be RDMA_CM_IDLE */
4080		ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
4081		if (ret)
4082			return ret;
4083		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
4084					   RDMA_CM_ADDR_QUERY)))
4085			return -EINVAL;
4086
4087	} else {
4088		memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
4089	}
4090
4091	if (cma_family(id_priv) != dst_addr->sa_family) {
4092		ret = -EINVAL;
4093		goto err_state;
4094	}
4095	return 0;
4096
4097err_state:
4098	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
4099	return ret;
4100}
4101
4102int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
4103		      const struct sockaddr *dst_addr, unsigned long timeout_ms)
4104{
4105	struct rdma_id_private *id_priv =
4106		container_of(id, struct rdma_id_private, id);
4107	int ret;
4108
4109	ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
4110	if (ret)
4111		return ret;
4112
4113	if (cma_any_addr(dst_addr)) {
4114		ret = cma_resolve_loopback(id_priv);
4115	} else {
4116		if (dst_addr->sa_family == AF_IB) {
4117			ret = cma_resolve_ib_addr(id_priv);
4118		} else {
4119			/*
4120			 * The FSM can return back to RDMA_CM_ADDR_BOUND after
4121			 * rdma_resolve_ip() is called, eg through the error
4122			 * path in addr_handler(). If this happens the existing
4123			 * request must be canceled before issuing a new one.
4124			 * Since canceling a request is a bit slow and this
4125			 * oddball path is rare, keep track once a request has
4126			 * been issued. The track turns out to be a permanent
4127			 * state since this is the only cancel as it is
4128			 * immediately before rdma_resolve_ip().
4129			 */
4130			if (id_priv->used_resolve_ip)
4131				rdma_addr_cancel(&id->route.addr.dev_addr);
4132			else
4133				id_priv->used_resolve_ip = 1;
4134			ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
4135					      &id->route.addr.dev_addr,
4136					      timeout_ms, addr_handler,
4137					      false, id_priv);
4138		}
4139	}
4140	if (ret)
4141		goto err;
4142
4143	return 0;
4144err:
4145	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
4146	return ret;
4147}
4148EXPORT_SYMBOL(rdma_resolve_addr);
4149
4150int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
4151{
4152	struct rdma_id_private *id_priv =
4153		container_of(id, struct rdma_id_private, id);
4154
4155	return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
4156}
4157EXPORT_SYMBOL(rdma_bind_addr);
4158
4159static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
4160{
4161	struct cma_hdr *cma_hdr;
4162
4163	cma_hdr = hdr;
4164	cma_hdr->cma_version = CMA_VERSION;
4165	if (cma_family(id_priv) == AF_INET) {
4166		struct sockaddr_in *src4, *dst4;
4167
4168		src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
4169		dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
4170
4171		cma_set_ip_ver(cma_hdr, 4);
4172		cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
4173		cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
4174		cma_hdr->port = src4->sin_port;
4175	} else if (cma_family(id_priv) == AF_INET6) {
4176		struct sockaddr_in6 *src6, *dst6;
4177
4178		src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
4179		dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
4180
4181		cma_set_ip_ver(cma_hdr, 6);
4182		cma_hdr->src_addr.ip6 = src6->sin6_addr;
4183		cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
4184		cma_hdr->port = src6->sin6_port;
4185	}
4186	return 0;
4187}
4188
4189static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
4190				const struct ib_cm_event *ib_event)
4191{
4192	struct rdma_id_private *id_priv = cm_id->context;
4193	struct rdma_cm_event event = {};
4194	const struct ib_cm_sidr_rep_event_param *rep =
4195				&ib_event->param.sidr_rep_rcvd;
4196	int ret;
4197
4198	mutex_lock(&id_priv->handler_mutex);
4199	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4200		goto out;
4201
4202	switch (ib_event->event) {
4203	case IB_CM_SIDR_REQ_ERROR:
4204		event.event = RDMA_CM_EVENT_UNREACHABLE;
4205		event.status = -ETIMEDOUT;
4206		break;
4207	case IB_CM_SIDR_REP_RECEIVED:
4208		event.param.ud.private_data = ib_event->private_data;
4209		event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
4210		if (rep->status != IB_SIDR_SUCCESS) {
4211			event.event = RDMA_CM_EVENT_UNREACHABLE;
4212			event.status = ib_event->param.sidr_rep_rcvd.status;
4213			pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
4214					     event.status);
4215			break;
4216		}
4217		ret = cma_set_qkey(id_priv, rep->qkey);
4218		if (ret) {
4219			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
4220			event.event = RDMA_CM_EVENT_ADDR_ERROR;
4221			event.status = ret;
4222			break;
4223		}
4224		ib_init_ah_attr_from_path(id_priv->id.device,
4225					  id_priv->id.port_num,
4226					  id_priv->id.route.path_rec,
4227					  &event.param.ud.ah_attr,
4228					  rep->sgid_attr);
4229		event.param.ud.qp_num = rep->qpn;
4230		event.param.ud.qkey = rep->qkey;
4231		event.event = RDMA_CM_EVENT_ESTABLISHED;
4232		event.status = 0;
4233		break;
4234	default:
4235		pr_err("RDMA CMA: unexpected IB CM event: %d\n",
4236		       ib_event->event);
4237		goto out;
4238	}
4239
4240	ret = cma_cm_event_handler(id_priv, &event);
4241
4242	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4243	if (ret) {
4244		/* Destroy the CM ID by returning a non-zero value. */
4245		id_priv->cm_id.ib = NULL;
4246		destroy_id_handler_unlock(id_priv);
4247		return ret;
4248	}
4249out:
4250	mutex_unlock(&id_priv->handler_mutex);
4251	return 0;
4252}
4253
4254static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
4255			      struct rdma_conn_param *conn_param)
4256{
4257	struct ib_cm_sidr_req_param req;
4258	struct ib_cm_id	*id;
4259	void *private_data;
4260	u8 offset;
4261	int ret;
4262
4263	memset(&req, 0, sizeof req);
4264	offset = cma_user_data_offset(id_priv);
4265	if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
4266		return -EINVAL;
4267
4268	if (req.private_data_len) {
4269		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4270		if (!private_data)
4271			return -ENOMEM;
4272	} else {
4273		private_data = NULL;
4274	}
4275
4276	if (conn_param->private_data && conn_param->private_data_len)
4277		memcpy(private_data + offset, conn_param->private_data,
4278		       conn_param->private_data_len);
4279
4280	if (private_data) {
4281		ret = cma_format_hdr(private_data, id_priv);
4282		if (ret)
4283			goto out;
4284		req.private_data = private_data;
4285	}
4286
4287	id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
4288			     id_priv);
4289	if (IS_ERR(id)) {
4290		ret = PTR_ERR(id);
4291		goto out;
4292	}
4293	id_priv->cm_id.ib = id;
4294
4295	req.path = id_priv->id.route.path_rec;
4296	req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4297	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4298	req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
4299	req.max_cm_retries = CMA_MAX_CM_RETRIES;
4300
4301	trace_cm_send_sidr_req(id_priv);
4302	ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
4303	if (ret) {
4304		ib_destroy_cm_id(id_priv->cm_id.ib);
4305		id_priv->cm_id.ib = NULL;
4306	}
4307out:
4308	kfree(private_data);
4309	return ret;
4310}
4311
4312static int cma_connect_ib(struct rdma_id_private *id_priv,
4313			  struct rdma_conn_param *conn_param)
4314{
4315	struct ib_cm_req_param req;
4316	struct rdma_route *route;
4317	void *private_data;
4318	struct ib_cm_id	*id;
4319	u8 offset;
4320	int ret;
4321
4322	memset(&req, 0, sizeof req);
4323	offset = cma_user_data_offset(id_priv);
4324	if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
4325		return -EINVAL;
4326
4327	if (req.private_data_len) {
4328		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4329		if (!private_data)
4330			return -ENOMEM;
4331	} else {
4332		private_data = NULL;
4333	}
4334
4335	if (conn_param->private_data && conn_param->private_data_len)
4336		memcpy(private_data + offset, conn_param->private_data,
4337		       conn_param->private_data_len);
4338
4339	id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
4340	if (IS_ERR(id)) {
4341		ret = PTR_ERR(id);
4342		goto out;
4343	}
4344	id_priv->cm_id.ib = id;
4345
4346	route = &id_priv->id.route;
4347	if (private_data) {
4348		ret = cma_format_hdr(private_data, id_priv);
4349		if (ret)
4350			goto out;
4351		req.private_data = private_data;
4352	}
4353
4354	req.primary_path = &route->path_rec[0];
4355	req.primary_path_inbound = route->path_rec_inbound;
4356	req.primary_path_outbound = route->path_rec_outbound;
4357	if (route->num_pri_alt_paths == 2)
4358		req.alternate_path = &route->path_rec[1];
4359
4360	req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4361	/* Alternate path SGID attribute currently unsupported */
4362	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4363	req.qp_num = id_priv->qp_num;
4364	req.qp_type = id_priv->id.qp_type;
4365	req.starting_psn = id_priv->seq_num;
4366	req.responder_resources = conn_param->responder_resources;
4367	req.initiator_depth = conn_param->initiator_depth;
4368	req.flow_control = conn_param->flow_control;
4369	req.retry_count = min_t(u8, 7, conn_param->retry_count);
4370	req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4371	req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4372	req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4373	req.max_cm_retries = CMA_MAX_CM_RETRIES;
4374	req.srq = id_priv->srq ? 1 : 0;
4375	req.ece.vendor_id = id_priv->ece.vendor_id;
4376	req.ece.attr_mod = id_priv->ece.attr_mod;
4377
4378	trace_cm_send_req(id_priv);
4379	ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
4380out:
4381	if (ret && !IS_ERR(id)) {
4382		ib_destroy_cm_id(id);
4383		id_priv->cm_id.ib = NULL;
4384	}
4385
4386	kfree(private_data);
4387	return ret;
4388}
4389
4390static int cma_connect_iw(struct rdma_id_private *id_priv,
4391			  struct rdma_conn_param *conn_param)
4392{
4393	struct iw_cm_id *cm_id;
4394	int ret;
4395	struct iw_cm_conn_param iw_param;
4396
4397	cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
4398	if (IS_ERR(cm_id))
4399		return PTR_ERR(cm_id);
4400
4401	mutex_lock(&id_priv->qp_mutex);
4402	cm_id->tos = id_priv->tos;
4403	cm_id->tos_set = id_priv->tos_set;
4404	mutex_unlock(&id_priv->qp_mutex);
4405
4406	id_priv->cm_id.iw = cm_id;
4407
4408	memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
4409	       rdma_addr_size(cma_src_addr(id_priv)));
4410	memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
4411	       rdma_addr_size(cma_dst_addr(id_priv)));
4412
4413	ret = cma_modify_qp_rtr(id_priv, conn_param);
4414	if (ret)
4415		goto out;
4416
4417	if (conn_param) {
4418		iw_param.ord = conn_param->initiator_depth;
4419		iw_param.ird = conn_param->responder_resources;
4420		iw_param.private_data = conn_param->private_data;
4421		iw_param.private_data_len = conn_param->private_data_len;
4422		iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
4423	} else {
4424		memset(&iw_param, 0, sizeof iw_param);
4425		iw_param.qpn = id_priv->qp_num;
4426	}
4427	ret = iw_cm_connect(cm_id, &iw_param);
4428out:
4429	if (ret) {
4430		iw_destroy_cm_id(cm_id);
4431		id_priv->cm_id.iw = NULL;
4432	}
4433	return ret;
4434}
4435
4436/**
4437 * rdma_connect_locked - Initiate an active connection request.
4438 * @id: Connection identifier to connect.
4439 * @conn_param: Connection information used for connected QPs.
4440 *
4441 * Same as rdma_connect() but can only be called from the
4442 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
4443 */
4444int rdma_connect_locked(struct rdma_cm_id *id,
4445			struct rdma_conn_param *conn_param)
4446{
4447	struct rdma_id_private *id_priv =
4448		container_of(id, struct rdma_id_private, id);
4449	int ret;
4450
4451	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
4452		return -EINVAL;
4453
4454	if (!id->qp) {
4455		id_priv->qp_num = conn_param->qp_num;
4456		id_priv->srq = conn_param->srq;
4457	}
4458
4459	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4460		if (id->qp_type == IB_QPT_UD)
4461			ret = cma_resolve_ib_udp(id_priv, conn_param);
4462		else
4463			ret = cma_connect_ib(id_priv, conn_param);
4464	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4465		ret = cma_connect_iw(id_priv, conn_param);
4466	} else {
4467		ret = -ENOSYS;
4468	}
4469	if (ret)
4470		goto err_state;
4471	return 0;
4472err_state:
4473	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
4474	return ret;
4475}
4476EXPORT_SYMBOL(rdma_connect_locked);
4477
4478/**
4479 * rdma_connect - Initiate an active connection request.
4480 * @id: Connection identifier to connect.
4481 * @conn_param: Connection information used for connected QPs.
4482 *
4483 * Users must have resolved a route for the rdma_cm_id to connect with by having
4484 * called rdma_resolve_route before calling this routine.
4485 *
4486 * This call will either connect to a remote QP or obtain remote QP information
4487 * for unconnected rdma_cm_id's.  The actual operation is based on the
4488 * rdma_cm_id's port space.
4489 */
4490int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4491{
4492	struct rdma_id_private *id_priv =
4493		container_of(id, struct rdma_id_private, id);
4494	int ret;
4495
4496	mutex_lock(&id_priv->handler_mutex);
4497	ret = rdma_connect_locked(id, conn_param);
4498	mutex_unlock(&id_priv->handler_mutex);
4499	return ret;
4500}
4501EXPORT_SYMBOL(rdma_connect);
4502
4503/**
4504 * rdma_connect_ece - Initiate an active connection request with ECE data.
4505 * @id: Connection identifier to connect.
4506 * @conn_param: Connection information used for connected QPs.
4507 * @ece: ECE parameters
4508 *
4509 * See rdma_connect() explanation.
4510 */
4511int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4512		     struct rdma_ucm_ece *ece)
4513{
4514	struct rdma_id_private *id_priv =
4515		container_of(id, struct rdma_id_private, id);
4516
4517	id_priv->ece.vendor_id = ece->vendor_id;
4518	id_priv->ece.attr_mod = ece->attr_mod;
4519
4520	return rdma_connect(id, conn_param);
4521}
4522EXPORT_SYMBOL(rdma_connect_ece);
4523
4524static int cma_accept_ib(struct rdma_id_private *id_priv,
4525			 struct rdma_conn_param *conn_param)
4526{
4527	struct ib_cm_rep_param rep;
4528	int ret;
4529
4530	ret = cma_modify_qp_rtr(id_priv, conn_param);
4531	if (ret)
4532		goto out;
4533
4534	ret = cma_modify_qp_rts(id_priv, conn_param);
4535	if (ret)
4536		goto out;
4537
4538	memset(&rep, 0, sizeof rep);
4539	rep.qp_num = id_priv->qp_num;
4540	rep.starting_psn = id_priv->seq_num;
4541	rep.private_data = conn_param->private_data;
4542	rep.private_data_len = conn_param->private_data_len;
4543	rep.responder_resources = conn_param->responder_resources;
4544	rep.initiator_depth = conn_param->initiator_depth;
4545	rep.failover_accepted = 0;
4546	rep.flow_control = conn_param->flow_control;
4547	rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4548	rep.srq = id_priv->srq ? 1 : 0;
4549	rep.ece.vendor_id = id_priv->ece.vendor_id;
4550	rep.ece.attr_mod = id_priv->ece.attr_mod;
4551
4552	trace_cm_send_rep(id_priv);
4553	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
4554out:
4555	return ret;
4556}
4557
4558static int cma_accept_iw(struct rdma_id_private *id_priv,
4559		  struct rdma_conn_param *conn_param)
4560{
4561	struct iw_cm_conn_param iw_param;
4562	int ret;
4563
4564	if (!conn_param)
4565		return -EINVAL;
4566
4567	ret = cma_modify_qp_rtr(id_priv, conn_param);
4568	if (ret)
4569		return ret;
4570
4571	iw_param.ord = conn_param->initiator_depth;
4572	iw_param.ird = conn_param->responder_resources;
4573	iw_param.private_data = conn_param->private_data;
4574	iw_param.private_data_len = conn_param->private_data_len;
4575	if (id_priv->id.qp)
4576		iw_param.qpn = id_priv->qp_num;
4577	else
4578		iw_param.qpn = conn_param->qp_num;
4579
4580	return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
4581}
4582
4583static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
4584			     enum ib_cm_sidr_status status, u32 qkey,
4585			     const void *private_data, int private_data_len)
4586{
4587	struct ib_cm_sidr_rep_param rep;
4588	int ret;
4589
4590	memset(&rep, 0, sizeof rep);
4591	rep.status = status;
4592	if (status == IB_SIDR_SUCCESS) {
4593		if (qkey)
4594			ret = cma_set_qkey(id_priv, qkey);
4595		else
4596			ret = cma_set_default_qkey(id_priv);
4597		if (ret)
4598			return ret;
4599		rep.qp_num = id_priv->qp_num;
4600		rep.qkey = id_priv->qkey;
4601
4602		rep.ece.vendor_id = id_priv->ece.vendor_id;
4603		rep.ece.attr_mod = id_priv->ece.attr_mod;
4604	}
4605
4606	rep.private_data = private_data;
4607	rep.private_data_len = private_data_len;
4608
4609	trace_cm_send_sidr_rep(id_priv);
4610	return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
4611}
4612
4613/**
4614 * rdma_accept - Called to accept a connection request or response.
4615 * @id: Connection identifier associated with the request.
4616 * @conn_param: Information needed to establish the connection.  This must be
4617 *   provided if accepting a connection request.  If accepting a connection
4618 *   response, this parameter must be NULL.
4619 *
4620 * Typically, this routine is only called by the listener to accept a connection
4621 * request.  It must also be called on the active side of a connection if the
4622 * user is performing their own QP transitions.
4623 *
4624 * In the case of error, a reject message is sent to the remote side and the
4625 * state of the qp associated with the id is modified to error, such that any
4626 * previously posted receive buffers would be flushed.
4627 *
4628 * This function is for use by kernel ULPs and must be called from under the
4629 * handler callback.
4630 */
4631int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4632{
4633	struct rdma_id_private *id_priv =
4634		container_of(id, struct rdma_id_private, id);
4635	int ret;
4636
4637	lockdep_assert_held(&id_priv->handler_mutex);
4638
4639	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4640		return -EINVAL;
4641
4642	if (!id->qp && conn_param) {
4643		id_priv->qp_num = conn_param->qp_num;
4644		id_priv->srq = conn_param->srq;
4645	}
4646
4647	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4648		if (id->qp_type == IB_QPT_UD) {
4649			if (conn_param)
4650				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4651							conn_param->qkey,
4652							conn_param->private_data,
4653							conn_param->private_data_len);
4654			else
4655				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4656							0, NULL, 0);
4657		} else {
4658			if (conn_param)
4659				ret = cma_accept_ib(id_priv, conn_param);
4660			else
4661				ret = cma_rep_recv(id_priv);
4662		}
4663	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4664		ret = cma_accept_iw(id_priv, conn_param);
4665	} else {
4666		ret = -ENOSYS;
4667	}
4668	if (ret)
4669		goto reject;
4670
4671	return 0;
4672reject:
4673	cma_modify_qp_err(id_priv);
4674	rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
4675	return ret;
4676}
4677EXPORT_SYMBOL(rdma_accept);
4678
4679int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4680		    struct rdma_ucm_ece *ece)
4681{
4682	struct rdma_id_private *id_priv =
4683		container_of(id, struct rdma_id_private, id);
4684
4685	id_priv->ece.vendor_id = ece->vendor_id;
4686	id_priv->ece.attr_mod = ece->attr_mod;
4687
4688	return rdma_accept(id, conn_param);
4689}
4690EXPORT_SYMBOL(rdma_accept_ece);
4691
4692void rdma_lock_handler(struct rdma_cm_id *id)
4693{
4694	struct rdma_id_private *id_priv =
4695		container_of(id, struct rdma_id_private, id);
4696
4697	mutex_lock(&id_priv->handler_mutex);
4698}
4699EXPORT_SYMBOL(rdma_lock_handler);
4700
4701void rdma_unlock_handler(struct rdma_cm_id *id)
4702{
4703	struct rdma_id_private *id_priv =
4704		container_of(id, struct rdma_id_private, id);
4705
4706	mutex_unlock(&id_priv->handler_mutex);
4707}
4708EXPORT_SYMBOL(rdma_unlock_handler);
4709
4710int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
4711{
4712	struct rdma_id_private *id_priv;
4713	int ret;
4714
4715	id_priv = container_of(id, struct rdma_id_private, id);
4716	if (!id_priv->cm_id.ib)
4717		return -EINVAL;
4718
4719	switch (id->device->node_type) {
4720	case RDMA_NODE_IB_CA:
4721		ret = ib_cm_notify(id_priv->cm_id.ib, event);
4722		break;
4723	default:
4724		ret = 0;
4725		break;
4726	}
4727	return ret;
4728}
4729EXPORT_SYMBOL(rdma_notify);
4730
4731int rdma_reject(struct rdma_cm_id *id, const void *private_data,
4732		u8 private_data_len, u8 reason)
4733{
4734	struct rdma_id_private *id_priv;
4735	int ret;
4736
4737	id_priv = container_of(id, struct rdma_id_private, id);
4738	if (!id_priv->cm_id.ib)
4739		return -EINVAL;
4740
4741	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4742		if (id->qp_type == IB_QPT_UD) {
4743			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
4744						private_data, private_data_len);
4745		} else {
4746			trace_cm_send_rej(id_priv);
4747			ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
4748					     private_data, private_data_len);
4749		}
4750	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4751		ret = iw_cm_reject(id_priv->cm_id.iw,
4752				   private_data, private_data_len);
4753	} else {
4754		ret = -ENOSYS;
4755	}
4756
4757	return ret;
4758}
4759EXPORT_SYMBOL(rdma_reject);
4760
4761int rdma_disconnect(struct rdma_cm_id *id)
4762{
4763	struct rdma_id_private *id_priv;
4764	int ret;
4765
4766	id_priv = container_of(id, struct rdma_id_private, id);
4767	if (!id_priv->cm_id.ib)
4768		return -EINVAL;
4769
4770	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4771		ret = cma_modify_qp_err(id_priv);
4772		if (ret)
4773			goto out;
4774		/* Initiate or respond to a disconnect. */
4775		trace_cm_disconnect(id_priv);
4776		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
4777			if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
4778				trace_cm_sent_drep(id_priv);
4779		} else {
4780			trace_cm_sent_dreq(id_priv);
4781		}
4782	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4783		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
4784	} else
4785		ret = -EINVAL;
4786
4787out:
4788	return ret;
4789}
4790EXPORT_SYMBOL(rdma_disconnect);
4791
4792static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
4793			      struct ib_sa_multicast *multicast,
4794			      struct rdma_cm_event *event,
4795			      struct cma_multicast *mc)
4796{
4797	struct rdma_dev_addr *dev_addr;
4798	enum ib_gid_type gid_type;
4799	struct net_device *ndev;
4800
4801	if (status)
4802		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4803				     status);
4804
4805	event->status = status;
4806	event->param.ud.private_data = mc->context;
4807	if (status) {
4808		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4809		return;
4810	}
4811
4812	dev_addr = &id_priv->id.route.addr.dev_addr;
4813	ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4814	gid_type =
4815		id_priv->cma_dev
4816			->default_gid_type[id_priv->id.port_num -
4817					   rdma_start_port(
4818						   id_priv->cma_dev->device)];
4819
4820	event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
4821	if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
4822				     &multicast->rec, ndev, gid_type,
4823				     &event->param.ud.ah_attr)) {
4824		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4825		goto out;
4826	}
4827
4828	event->param.ud.qp_num = 0xFFFFFF;
4829	event->param.ud.qkey = id_priv->qkey;
4830
4831out:
4832	dev_put(ndev);
4833}
4834
4835static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
4836{
4837	struct cma_multicast *mc = multicast->context;
4838	struct rdma_id_private *id_priv = mc->id_priv;
4839	struct rdma_cm_event event = {};
4840	int ret = 0;
4841
4842	mutex_lock(&id_priv->handler_mutex);
4843	if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
4844	    READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
4845		goto out;
4846
4847	ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
4848	if (!ret) {
4849		cma_make_mc_event(status, id_priv, multicast, &event, mc);
4850		ret = cma_cm_event_handler(id_priv, &event);
4851	}
4852	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4853	WARN_ON(ret);
4854
4855out:
4856	mutex_unlock(&id_priv->handler_mutex);
4857	return 0;
4858}
4859
4860static void cma_set_mgid(struct rdma_id_private *id_priv,
4861			 struct sockaddr *addr, union ib_gid *mgid)
4862{
4863	unsigned char mc_map[MAX_ADDR_LEN];
4864	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4865	struct sockaddr_in *sin = (struct sockaddr_in *) addr;
4866	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
4867
4868	if (cma_any_addr(addr)) {
4869		memset(mgid, 0, sizeof *mgid);
4870	} else if ((addr->sa_family == AF_INET6) &&
4871		   ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
4872								 0xFF10A01B)) {
4873		/* IPv6 address is an SA assigned MGID. */
4874		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4875	} else if (addr->sa_family == AF_IB) {
4876		memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
4877	} else if (addr->sa_family == AF_INET6) {
4878		ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
4879		if (id_priv->id.ps == RDMA_PS_UDP)
4880			mc_map[7] = 0x01;	/* Use RDMA CM signature */
4881		*mgid = *(union ib_gid *) (mc_map + 4);
4882	} else {
4883		ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
4884		if (id_priv->id.ps == RDMA_PS_UDP)
4885			mc_map[7] = 0x01;	/* Use RDMA CM signature */
4886		*mgid = *(union ib_gid *) (mc_map + 4);
4887	}
4888}
4889
4890static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
4891				 struct cma_multicast *mc)
4892{
4893	struct ib_sa_mcmember_rec rec;
4894	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4895	ib_sa_comp_mask comp_mask;
4896	int ret;
4897
4898	ib_addr_get_mgid(dev_addr, &rec.mgid);
4899	ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4900				     &rec.mgid, &rec);
4901	if (ret)
4902		return ret;
4903
4904	if (!id_priv->qkey) {
4905		ret = cma_set_default_qkey(id_priv);
4906		if (ret)
4907			return ret;
4908	}
4909
4910	cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
4911	rec.qkey = cpu_to_be32(id_priv->qkey);
4912	rdma_addr_get_sgid(dev_addr, &rec.port_gid);
4913	rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
4914	rec.join_state = mc->join_state;
4915
4916	comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
4917		    IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
4918		    IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
4919		    IB_SA_MCMEMBER_REC_FLOW_LABEL |
4920		    IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
4921
4922	if (id_priv->id.ps == RDMA_PS_IPOIB)
4923		comp_mask |= IB_SA_MCMEMBER_REC_RATE |
4924			     IB_SA_MCMEMBER_REC_RATE_SELECTOR |
4925			     IB_SA_MCMEMBER_REC_MTU_SELECTOR |
4926			     IB_SA_MCMEMBER_REC_MTU |
4927			     IB_SA_MCMEMBER_REC_HOP_LIMIT;
4928
4929	mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4930					 id_priv->id.port_num, &rec, comp_mask,
4931					 GFP_KERNEL, cma_ib_mc_handler, mc);
4932	return PTR_ERR_OR_ZERO(mc->sa_mc);
4933}
4934
4935static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
4936			      enum ib_gid_type gid_type)
4937{
4938	struct sockaddr_in *sin = (struct sockaddr_in *)addr;
4939	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
4940
4941	if (cma_any_addr(addr)) {
4942		memset(mgid, 0, sizeof *mgid);
4943	} else if (addr->sa_family == AF_INET6) {
4944		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4945	} else {
4946		mgid->raw[0] =
4947			(gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
4948		mgid->raw[1] =
4949			(gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
4950		mgid->raw[2] = 0;
4951		mgid->raw[3] = 0;
4952		mgid->raw[4] = 0;
4953		mgid->raw[5] = 0;
4954		mgid->raw[6] = 0;
4955		mgid->raw[7] = 0;
4956		mgid->raw[8] = 0;
4957		mgid->raw[9] = 0;
4958		mgid->raw[10] = 0xff;
4959		mgid->raw[11] = 0xff;
4960		*(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
4961	}
4962}
4963
4964static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
4965				   struct cma_multicast *mc)
4966{
4967	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4968	int err = 0;
4969	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
4970	struct net_device *ndev = NULL;
4971	struct ib_sa_multicast ib = {};
4972	enum ib_gid_type gid_type;
4973	bool send_only;
4974
4975	send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
4976
4977	if (cma_zero_addr(addr))
4978		return -EINVAL;
4979
4980	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4981		   rdma_start_port(id_priv->cma_dev->device)];
4982	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
4983
4984	ib.rec.pkey = cpu_to_be16(0xffff);
4985	if (dev_addr->bound_dev_if)
4986		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4987	if (!ndev)
4988		return -ENODEV;
4989
4990	ib.rec.rate = IB_RATE_PORT_CURRENT;
4991	ib.rec.hop_limit = 1;
4992	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
4993
4994	if (addr->sa_family == AF_INET) {
4995		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
4996			ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
4997			if (!send_only) {
4998				err = cma_igmp_send(ndev, &ib.rec.mgid,
4999						    true);
5000			}
5001		}
5002	} else {
5003		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
5004			err = -ENOTSUPP;
5005	}
5006	dev_put(ndev);
5007	if (err || !ib.rec.mtu)
5008		return err ?: -EINVAL;
5009
5010	if (!id_priv->qkey)
5011		cma_set_default_qkey(id_priv);
5012
5013	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
5014		    &ib.rec.port_gid);
5015	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
5016	cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
5017	queue_work(cma_wq, &mc->iboe_join.work);
5018	return 0;
5019}
5020
5021int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
5022			u8 join_state, void *context)
5023{
5024	struct rdma_id_private *id_priv =
5025		container_of(id, struct rdma_id_private, id);
5026	struct cma_multicast *mc;
5027	int ret;
5028
5029	/* Not supported for kernel QPs */
5030	if (WARN_ON(id->qp))
5031		return -EINVAL;
5032
5033	/* ULP is calling this wrong. */
5034	if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
5035			    READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
5036		return -EINVAL;
5037
5038	if (id_priv->id.qp_type != IB_QPT_UD)
5039		return -EINVAL;
5040
5041	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
5042	if (!mc)
5043		return -ENOMEM;
5044
5045	memcpy(&mc->addr, addr, rdma_addr_size(addr));
5046	mc->context = context;
5047	mc->id_priv = id_priv;
5048	mc->join_state = join_state;
5049
5050	if (rdma_protocol_roce(id->device, id->port_num)) {
5051		ret = cma_iboe_join_multicast(id_priv, mc);
5052		if (ret)
5053			goto out_err;
5054	} else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
5055		ret = cma_join_ib_multicast(id_priv, mc);
5056		if (ret)
5057			goto out_err;
5058	} else {
5059		ret = -ENOSYS;
5060		goto out_err;
5061	}
5062
5063	spin_lock(&id_priv->lock);
5064	list_add(&mc->list, &id_priv->mc_list);
5065	spin_unlock(&id_priv->lock);
5066
5067	return 0;
5068out_err:
5069	kfree(mc);
5070	return ret;
5071}
5072EXPORT_SYMBOL(rdma_join_multicast);
5073
5074void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
5075{
5076	struct rdma_id_private *id_priv;
5077	struct cma_multicast *mc;
5078
5079	id_priv = container_of(id, struct rdma_id_private, id);
5080	spin_lock_irq(&id_priv->lock);
5081	list_for_each_entry(mc, &id_priv->mc_list, list) {
5082		if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
5083			continue;
5084		list_del(&mc->list);
5085		spin_unlock_irq(&id_priv->lock);
5086
5087		WARN_ON(id_priv->cma_dev->device != id->device);
5088		destroy_mc(id_priv, mc);
5089		return;
5090	}
5091	spin_unlock_irq(&id_priv->lock);
5092}
5093EXPORT_SYMBOL(rdma_leave_multicast);
5094
5095static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
5096{
5097	struct rdma_dev_addr *dev_addr;
5098	struct cma_work *work;
5099
5100	dev_addr = &id_priv->id.route.addr.dev_addr;
5101
5102	if ((dev_addr->bound_dev_if == ndev->ifindex) &&
5103	    (net_eq(dev_net(ndev), dev_addr->net)) &&
5104	    memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
5105		pr_info("RDMA CM addr change for ndev %s used by id %p\n",
5106			ndev->name, &id_priv->id);
5107		work = kzalloc(sizeof *work, GFP_KERNEL);
5108		if (!work)
5109			return -ENOMEM;
5110
5111		INIT_WORK(&work->work, cma_work_handler);
5112		work->id = id_priv;
5113		work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
5114		cma_id_get(id_priv);
5115		queue_work(cma_wq, &work->work);
5116	}
5117
5118	return 0;
5119}
5120
5121static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
5122			       void *ptr)
5123{
5124	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5125	struct cma_device *cma_dev;
5126	struct rdma_id_private *id_priv;
5127	int ret = NOTIFY_DONE;
5128
5129	if (event != NETDEV_BONDING_FAILOVER)
5130		return NOTIFY_DONE;
5131
5132	if (!netif_is_bond_master(ndev))
5133		return NOTIFY_DONE;
5134
5135	mutex_lock(&lock);
5136	list_for_each_entry(cma_dev, &dev_list, list)
5137		list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
5138			ret = cma_netdev_change(ndev, id_priv);
5139			if (ret)
5140				goto out;
5141		}
5142
5143out:
5144	mutex_unlock(&lock);
5145	return ret;
5146}
5147
5148static void cma_netevent_work_handler(struct work_struct *_work)
5149{
5150	struct rdma_id_private *id_priv =
5151		container_of(_work, struct rdma_id_private, id.net_work);
5152	struct rdma_cm_event event = {};
5153
5154	mutex_lock(&id_priv->handler_mutex);
5155
5156	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
5157	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
5158		goto out_unlock;
5159
5160	event.event = RDMA_CM_EVENT_UNREACHABLE;
5161	event.status = -ETIMEDOUT;
5162
5163	if (cma_cm_event_handler(id_priv, &event)) {
5164		__acquire(&id_priv->handler_mutex);
5165		id_priv->cm_id.ib = NULL;
5166		cma_id_put(id_priv);
5167		destroy_id_handler_unlock(id_priv);
5168		return;
5169	}
5170
5171out_unlock:
5172	mutex_unlock(&id_priv->handler_mutex);
5173	cma_id_put(id_priv);
5174}
5175
5176static int cma_netevent_callback(struct notifier_block *self,
5177				 unsigned long event, void *ctx)
5178{
5179	struct id_table_entry *ips_node = NULL;
5180	struct rdma_id_private *current_id;
5181	struct neighbour *neigh = ctx;
5182	unsigned long flags;
5183
5184	if (event != NETEVENT_NEIGH_UPDATE)
5185		return NOTIFY_DONE;
5186
5187	spin_lock_irqsave(&id_table_lock, flags);
5188	if (neigh->tbl->family == AF_INET6) {
5189		struct sockaddr_in6 neigh_sock_6;
5190
5191		neigh_sock_6.sin6_family = AF_INET6;
5192		neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
5193		ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5194					     (struct sockaddr *)&neigh_sock_6);
5195	} else if (neigh->tbl->family == AF_INET) {
5196		struct sockaddr_in neigh_sock_4;
5197
5198		neigh_sock_4.sin_family = AF_INET;
5199		neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
5200		ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5201					     (struct sockaddr *)&neigh_sock_4);
5202	} else
5203		goto out;
5204
5205	if (!ips_node)
5206		goto out;
5207
5208	list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
5209		if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
5210			   neigh->ha, ETH_ALEN))
5211			continue;
5212		INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
5213		cma_id_get(current_id);
5214		queue_work(cma_wq, &current_id->id.net_work);
5215	}
5216out:
5217	spin_unlock_irqrestore(&id_table_lock, flags);
5218	return NOTIFY_DONE;
5219}
5220
5221static struct notifier_block cma_nb = {
5222	.notifier_call = cma_netdev_callback
5223};
5224
5225static struct notifier_block cma_netevent_cb = {
5226	.notifier_call = cma_netevent_callback
5227};
5228
5229static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
5230{
5231	struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
5232	enum rdma_cm_state state;
5233	unsigned long flags;
5234
5235	mutex_lock(&id_priv->handler_mutex);
5236	/* Record that we want to remove the device */
5237	spin_lock_irqsave(&id_priv->lock, flags);
5238	state = id_priv->state;
5239	if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
5240		spin_unlock_irqrestore(&id_priv->lock, flags);
5241		mutex_unlock(&id_priv->handler_mutex);
5242		cma_id_put(id_priv);
5243		return;
5244	}
5245	id_priv->state = RDMA_CM_DEVICE_REMOVAL;
5246	spin_unlock_irqrestore(&id_priv->lock, flags);
5247
5248	if (cma_cm_event_handler(id_priv, &event)) {
5249		/*
5250		 * At this point the ULP promises it won't call
5251		 * rdma_destroy_id() concurrently
5252		 */
5253		cma_id_put(id_priv);
5254		mutex_unlock(&id_priv->handler_mutex);
5255		trace_cm_id_destroy(id_priv);
5256		_destroy_id(id_priv, state);
5257		return;
5258	}
5259	mutex_unlock(&id_priv->handler_mutex);
5260
5261	/*
5262	 * If this races with destroy then the thread that first assigns state
5263	 * to a destroying does the cancel.
5264	 */
5265	cma_cancel_operation(id_priv, state);
5266	cma_id_put(id_priv);
5267}
5268
5269static void cma_process_remove(struct cma_device *cma_dev)
5270{
5271	mutex_lock(&lock);
5272	while (!list_empty(&cma_dev->id_list)) {
5273		struct rdma_id_private *id_priv = list_first_entry(
5274			&cma_dev->id_list, struct rdma_id_private, device_item);
5275
5276		list_del_init(&id_priv->listen_item);
5277		list_del_init(&id_priv->device_item);
5278		cma_id_get(id_priv);
5279		mutex_unlock(&lock);
5280
5281		cma_send_device_removal_put(id_priv);
5282
5283		mutex_lock(&lock);
5284	}
5285	mutex_unlock(&lock);
5286
5287	cma_dev_put(cma_dev);
5288	wait_for_completion(&cma_dev->comp);
5289}
5290
5291static bool cma_supported(struct ib_device *device)
5292{
5293	u32 i;
5294
5295	rdma_for_each_port(device, i) {
5296		if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
5297			return true;
5298	}
5299	return false;
5300}
5301
5302static int cma_add_one(struct ib_device *device)
5303{
5304	struct rdma_id_private *to_destroy;
5305	struct cma_device *cma_dev;
5306	struct rdma_id_private *id_priv;
5307	unsigned long supported_gids = 0;
5308	int ret;
5309	u32 i;
5310
5311	if (!cma_supported(device))
5312		return -EOPNOTSUPP;
5313
5314	cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
5315	if (!cma_dev)
5316		return -ENOMEM;
5317
5318	cma_dev->device = device;
5319	cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
5320					    sizeof(*cma_dev->default_gid_type),
5321					    GFP_KERNEL);
5322	if (!cma_dev->default_gid_type) {
5323		ret = -ENOMEM;
5324		goto free_cma_dev;
5325	}
5326
5327	cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
5328					    sizeof(*cma_dev->default_roce_tos),
5329					    GFP_KERNEL);
5330	if (!cma_dev->default_roce_tos) {
5331		ret = -ENOMEM;
5332		goto free_gid_type;
5333	}
5334
5335	rdma_for_each_port (device, i) {
5336		supported_gids = roce_gid_type_mask_support(device, i);
5337		WARN_ON(!supported_gids);
5338		if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
5339			cma_dev->default_gid_type[i - rdma_start_port(device)] =
5340				CMA_PREFERRED_ROCE_GID_TYPE;
5341		else
5342			cma_dev->default_gid_type[i - rdma_start_port(device)] =
5343				find_first_bit(&supported_gids, BITS_PER_LONG);
5344		cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
5345	}
5346
5347	init_completion(&cma_dev->comp);
5348	refcount_set(&cma_dev->refcount, 1);
5349	INIT_LIST_HEAD(&cma_dev->id_list);
5350	ib_set_client_data(device, &cma_client, cma_dev);
5351
5352	mutex_lock(&lock);
5353	list_add_tail(&cma_dev->list, &dev_list);
5354	list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
5355		ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
5356		if (ret)
5357			goto free_listen;
5358	}
5359	mutex_unlock(&lock);
5360
5361	trace_cm_add_one(device);
5362	return 0;
5363
5364free_listen:
5365	list_del(&cma_dev->list);
5366	mutex_unlock(&lock);
5367
5368	/* cma_process_remove() will delete to_destroy */
5369	cma_process_remove(cma_dev);
5370	kfree(cma_dev->default_roce_tos);
5371free_gid_type:
5372	kfree(cma_dev->default_gid_type);
5373
5374free_cma_dev:
5375	kfree(cma_dev);
5376	return ret;
5377}
5378
5379static void cma_remove_one(struct ib_device *device, void *client_data)
5380{
5381	struct cma_device *cma_dev = client_data;
5382
5383	trace_cm_remove_one(device);
5384
5385	mutex_lock(&lock);
5386	list_del(&cma_dev->list);
5387	mutex_unlock(&lock);
5388
5389	cma_process_remove(cma_dev);
5390	kfree(cma_dev->default_roce_tos);
5391	kfree(cma_dev->default_gid_type);
5392	kfree(cma_dev);
5393}
5394
5395static int cma_init_net(struct net *net)
5396{
5397	struct cma_pernet *pernet = cma_pernet(net);
5398
5399	xa_init(&pernet->tcp_ps);
5400	xa_init(&pernet->udp_ps);
5401	xa_init(&pernet->ipoib_ps);
5402	xa_init(&pernet->ib_ps);
5403
5404	return 0;
5405}
5406
5407static void cma_exit_net(struct net *net)
5408{
5409	struct cma_pernet *pernet = cma_pernet(net);
5410
5411	WARN_ON(!xa_empty(&pernet->tcp_ps));
5412	WARN_ON(!xa_empty(&pernet->udp_ps));
5413	WARN_ON(!xa_empty(&pernet->ipoib_ps));
5414	WARN_ON(!xa_empty(&pernet->ib_ps));
5415}
5416
5417static struct pernet_operations cma_pernet_operations = {
5418	.init = cma_init_net,
5419	.exit = cma_exit_net,
5420	.id = &cma_pernet_id,
5421	.size = sizeof(struct cma_pernet),
5422};
5423
5424static int __init cma_init(void)
5425{
5426	int ret;
5427
5428	/*
5429	 * There is a rare lock ordering dependency in cma_netdev_callback()
5430	 * that only happens when bonding is enabled. Teach lockdep that rtnl
5431	 * must never be nested under lock so it can find these without having
5432	 * to test with bonding.
5433	 */
5434	if (IS_ENABLED(CONFIG_LOCKDEP)) {
5435		rtnl_lock();
5436		mutex_lock(&lock);
5437		mutex_unlock(&lock);
5438		rtnl_unlock();
5439	}
5440
5441	cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
5442	if (!cma_wq)
5443		return -ENOMEM;
5444
5445	ret = register_pernet_subsys(&cma_pernet_operations);
5446	if (ret)
5447		goto err_wq;
5448
5449	ib_sa_register_client(&sa_client);
5450	register_netdevice_notifier(&cma_nb);
5451	register_netevent_notifier(&cma_netevent_cb);
5452
5453	ret = ib_register_client(&cma_client);
5454	if (ret)
5455		goto err;
5456
5457	ret = cma_configfs_init();
5458	if (ret)
5459		goto err_ib;
5460
5461	return 0;
5462
5463err_ib:
5464	ib_unregister_client(&cma_client);
5465err:
5466	unregister_netevent_notifier(&cma_netevent_cb);
5467	unregister_netdevice_notifier(&cma_nb);
5468	ib_sa_unregister_client(&sa_client);
5469	unregister_pernet_subsys(&cma_pernet_operations);
5470err_wq:
5471	destroy_workqueue(cma_wq);
5472	return ret;
5473}
5474
5475static void __exit cma_cleanup(void)
5476{
5477	cma_configfs_exit();
5478	ib_unregister_client(&cma_client);
5479	unregister_netevent_notifier(&cma_netevent_cb);
5480	unregister_netdevice_notifier(&cma_nb);
5481	ib_sa_unregister_client(&sa_client);
5482	unregister_pernet_subsys(&cma_pernet_operations);
5483	destroy_workqueue(cma_wq);
5484}
5485
5486module_init(cma_init);
5487module_exit(cma_cleanup);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
   4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
   5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
   6 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
   7 */
   8
   9#include <linux/completion.h>
  10#include <linux/in.h>
  11#include <linux/in6.h>
  12#include <linux/mutex.h>
  13#include <linux/random.h>
  14#include <linux/rbtree.h>
  15#include <linux/igmp.h>
  16#include <linux/xarray.h>
  17#include <linux/inetdevice.h>
  18#include <linux/slab.h>
  19#include <linux/module.h>
  20#include <net/route.h>
  21
  22#include <net/net_namespace.h>
  23#include <net/netns/generic.h>
  24#include <net/netevent.h>
  25#include <net/tcp.h>
  26#include <net/ipv6.h>
  27#include <net/ip_fib.h>
  28#include <net/ip6_route.h>
  29
  30#include <rdma/rdma_cm.h>
  31#include <rdma/rdma_cm_ib.h>
  32#include <rdma/rdma_netlink.h>
  33#include <rdma/ib.h>
  34#include <rdma/ib_cache.h>
  35#include <rdma/ib_cm.h>
  36#include <rdma/ib_sa.h>
  37#include <rdma/iw_cm.h>
  38
  39#include "core_priv.h"
  40#include "cma_priv.h"
  41#include "cma_trace.h"
  42
  43MODULE_AUTHOR("Sean Hefty");
  44MODULE_DESCRIPTION("Generic RDMA CM Agent");
  45MODULE_LICENSE("Dual BSD/GPL");
  46
  47#define CMA_CM_RESPONSE_TIMEOUT 20
  48#define CMA_MAX_CM_RETRIES 15
  49#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  50#define CMA_IBOE_PACKET_LIFETIME 16
  51#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
  52
  53static const char * const cma_events[] = {
  54	[RDMA_CM_EVENT_ADDR_RESOLVED]	 = "address resolved",
  55	[RDMA_CM_EVENT_ADDR_ERROR]	 = "address error",
  56	[RDMA_CM_EVENT_ROUTE_RESOLVED]	 = "route resolved ",
  57	[RDMA_CM_EVENT_ROUTE_ERROR]	 = "route error",
  58	[RDMA_CM_EVENT_CONNECT_REQUEST]	 = "connect request",
  59	[RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
  60	[RDMA_CM_EVENT_CONNECT_ERROR]	 = "connect error",
  61	[RDMA_CM_EVENT_UNREACHABLE]	 = "unreachable",
  62	[RDMA_CM_EVENT_REJECTED]	 = "rejected",
  63	[RDMA_CM_EVENT_ESTABLISHED]	 = "established",
  64	[RDMA_CM_EVENT_DISCONNECTED]	 = "disconnected",
  65	[RDMA_CM_EVENT_DEVICE_REMOVAL]	 = "device removal",
  66	[RDMA_CM_EVENT_MULTICAST_JOIN]	 = "multicast join",
  67	[RDMA_CM_EVENT_MULTICAST_ERROR]	 = "multicast error",
  68	[RDMA_CM_EVENT_ADDR_CHANGE]	 = "address change",
  69	[RDMA_CM_EVENT_TIMEWAIT_EXIT]	 = "timewait exit",
  70};
  71
  72static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
  73			      enum ib_gid_type gid_type);
  74
  75const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
  76{
  77	size_t index = event;
  78
  79	return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
  80			cma_events[index] : "unrecognized event";
  81}
  82EXPORT_SYMBOL(rdma_event_msg);
  83
  84const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
  85						int reason)
  86{
  87	if (rdma_ib_or_roce(id->device, id->port_num))
  88		return ibcm_reject_msg(reason);
  89
  90	if (rdma_protocol_iwarp(id->device, id->port_num))
  91		return iwcm_reject_msg(reason);
  92
  93	WARN_ON_ONCE(1);
  94	return "unrecognized transport";
  95}
  96EXPORT_SYMBOL(rdma_reject_msg);
  97
  98/**
  99 * rdma_is_consumer_reject - return true if the consumer rejected the connect
 100 *                           request.
 101 * @id: Communication identifier that received the REJECT event.
 102 * @reason: Value returned in the REJECT event status field.
 103 */
 104static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
 105{
 106	if (rdma_ib_or_roce(id->device, id->port_num))
 107		return reason == IB_CM_REJ_CONSUMER_DEFINED;
 108
 109	if (rdma_protocol_iwarp(id->device, id->port_num))
 110		return reason == -ECONNREFUSED;
 111
 112	WARN_ON_ONCE(1);
 113	return false;
 114}
 115
 116const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
 117				      struct rdma_cm_event *ev, u8 *data_len)
 118{
 119	const void *p;
 120
 121	if (rdma_is_consumer_reject(id, ev->status)) {
 122		*data_len = ev->param.conn.private_data_len;
 123		p = ev->param.conn.private_data;
 124	} else {
 125		*data_len = 0;
 126		p = NULL;
 127	}
 128	return p;
 129}
 130EXPORT_SYMBOL(rdma_consumer_reject_data);
 131
 132/**
 133 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
 134 * @id: Communication Identifier
 135 */
 136struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
 137{
 138	struct rdma_id_private *id_priv;
 139
 140	id_priv = container_of(id, struct rdma_id_private, id);
 141	if (id->device->node_type == RDMA_NODE_RNIC)
 142		return id_priv->cm_id.iw;
 143	return NULL;
 144}
 145EXPORT_SYMBOL(rdma_iw_cm_id);
 146
 147/**
 148 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
 149 * @res: rdma resource tracking entry pointer
 150 */
 151struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
 152{
 153	struct rdma_id_private *id_priv =
 154		container_of(res, struct rdma_id_private, res);
 155
 156	return &id_priv->id;
 157}
 158EXPORT_SYMBOL(rdma_res_to_id);
 159
 160static int cma_add_one(struct ib_device *device);
 161static void cma_remove_one(struct ib_device *device, void *client_data);
 162
 163static struct ib_client cma_client = {
 164	.name   = "cma",
 165	.add    = cma_add_one,
 166	.remove = cma_remove_one
 167};
 168
 169static struct ib_sa_client sa_client;
 170static LIST_HEAD(dev_list);
 171static LIST_HEAD(listen_any_list);
 172static DEFINE_MUTEX(lock);
 173static struct rb_root id_table = RB_ROOT;
 174/* Serialize operations of id_table tree */
 175static DEFINE_SPINLOCK(id_table_lock);
 176static struct workqueue_struct *cma_wq;
 177static unsigned int cma_pernet_id;
 178
 179struct cma_pernet {
 180	struct xarray tcp_ps;
 181	struct xarray udp_ps;
 182	struct xarray ipoib_ps;
 183	struct xarray ib_ps;
 184};
 185
 186static struct cma_pernet *cma_pernet(struct net *net)
 187{
 188	return net_generic(net, cma_pernet_id);
 189}
 190
 191static
 192struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
 193{
 194	struct cma_pernet *pernet = cma_pernet(net);
 195
 196	switch (ps) {
 197	case RDMA_PS_TCP:
 198		return &pernet->tcp_ps;
 199	case RDMA_PS_UDP:
 200		return &pernet->udp_ps;
 201	case RDMA_PS_IPOIB:
 202		return &pernet->ipoib_ps;
 203	case RDMA_PS_IB:
 204		return &pernet->ib_ps;
 205	default:
 206		return NULL;
 207	}
 208}
 209
 210struct id_table_entry {
 211	struct list_head id_list;
 212	struct rb_node rb_node;
 213};
 214
 215struct cma_device {
 216	struct list_head	list;
 217	struct ib_device	*device;
 218	struct completion	comp;
 219	refcount_t refcount;
 220	struct list_head	id_list;
 221	enum ib_gid_type	*default_gid_type;
 222	u8			*default_roce_tos;
 223};
 224
 225struct rdma_bind_list {
 226	enum rdma_ucm_port_space ps;
 227	struct hlist_head	owners;
 228	unsigned short		port;
 229};
 230
 231static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
 232			struct rdma_bind_list *bind_list, int snum)
 233{
 234	struct xarray *xa = cma_pernet_xa(net, ps);
 235
 236	return xa_insert(xa, snum, bind_list, GFP_KERNEL);
 237}
 238
 239static struct rdma_bind_list *cma_ps_find(struct net *net,
 240					  enum rdma_ucm_port_space ps, int snum)
 241{
 242	struct xarray *xa = cma_pernet_xa(net, ps);
 243
 244	return xa_load(xa, snum);
 245}
 246
 247static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
 248			  int snum)
 249{
 250	struct xarray *xa = cma_pernet_xa(net, ps);
 251
 252	xa_erase(xa, snum);
 253}
 254
 255enum {
 256	CMA_OPTION_AFONLY,
 257};
 258
 259void cma_dev_get(struct cma_device *cma_dev)
 260{
 261	refcount_inc(&cma_dev->refcount);
 262}
 263
 264void cma_dev_put(struct cma_device *cma_dev)
 265{
 266	if (refcount_dec_and_test(&cma_dev->refcount))
 267		complete(&cma_dev->comp);
 268}
 269
 270struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,
 271					     void		*cookie)
 272{
 273	struct cma_device *cma_dev;
 274	struct cma_device *found_cma_dev = NULL;
 275
 276	mutex_lock(&lock);
 277
 278	list_for_each_entry(cma_dev, &dev_list, list)
 279		if (filter(cma_dev->device, cookie)) {
 280			found_cma_dev = cma_dev;
 281			break;
 282		}
 283
 284	if (found_cma_dev)
 285		cma_dev_get(found_cma_dev);
 286	mutex_unlock(&lock);
 287	return found_cma_dev;
 288}
 289
 290int cma_get_default_gid_type(struct cma_device *cma_dev,
 291			     u32 port)
 292{
 293	if (!rdma_is_port_valid(cma_dev->device, port))
 294		return -EINVAL;
 295
 296	return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
 297}
 298
 299int cma_set_default_gid_type(struct cma_device *cma_dev,
 300			     u32 port,
 301			     enum ib_gid_type default_gid_type)
 302{
 303	unsigned long supported_gids;
 304
 305	if (!rdma_is_port_valid(cma_dev->device, port))
 306		return -EINVAL;
 307
 308	if (default_gid_type == IB_GID_TYPE_IB &&
 309	    rdma_protocol_roce_eth_encap(cma_dev->device, port))
 310		default_gid_type = IB_GID_TYPE_ROCE;
 311
 312	supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
 313
 314	if (!(supported_gids & 1 << default_gid_type))
 315		return -EINVAL;
 316
 317	cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
 318		default_gid_type;
 319
 320	return 0;
 321}
 322
 323int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
 324{
 325	if (!rdma_is_port_valid(cma_dev->device, port))
 326		return -EINVAL;
 327
 328	return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
 329}
 330
 331int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
 332			     u8 default_roce_tos)
 333{
 334	if (!rdma_is_port_valid(cma_dev->device, port))
 335		return -EINVAL;
 336
 337	cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
 338		 default_roce_tos;
 339
 340	return 0;
 341}
 342struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
 343{
 344	return cma_dev->device;
 345}
 346
 347/*
 348 * Device removal can occur at anytime, so we need extra handling to
 349 * serialize notifying the user of device removal with other callbacks.
 350 * We do this by disabling removal notification while a callback is in process,
 351 * and reporting it after the callback completes.
 352 */
 353
 354struct cma_multicast {
 355	struct rdma_id_private *id_priv;
 356	union {
 357		struct ib_sa_multicast *sa_mc;
 358		struct {
 359			struct work_struct work;
 360			struct rdma_cm_event event;
 361		} iboe_join;
 362	};
 363	struct list_head	list;
 364	void			*context;
 365	struct sockaddr_storage	addr;
 366	u8			join_state;
 367};
 368
 369struct cma_work {
 370	struct work_struct	work;
 371	struct rdma_id_private	*id;
 372	enum rdma_cm_state	old_state;
 373	enum rdma_cm_state	new_state;
 374	struct rdma_cm_event	event;
 375};
 376
 377union cma_ip_addr {
 378	struct in6_addr ip6;
 379	struct {
 380		__be32 pad[3];
 381		__be32 addr;
 382	} ip4;
 383};
 384
 385struct cma_hdr {
 386	u8 cma_version;
 387	u8 ip_version;	/* IP version: 7:4 */
 388	__be16 port;
 389	union cma_ip_addr src_addr;
 390	union cma_ip_addr dst_addr;
 391};
 392
 393#define CMA_VERSION 0x00
 394
 395struct cma_req_info {
 396	struct sockaddr_storage listen_addr_storage;
 397	struct sockaddr_storage src_addr_storage;
 398	struct ib_device *device;
 399	union ib_gid local_gid;
 400	__be64 service_id;
 401	int port;
 402	bool has_gid;
 403	u16 pkey;
 404};
 405
 406static int cma_comp_exch(struct rdma_id_private *id_priv,
 407			 enum rdma_cm_state comp, enum rdma_cm_state exch)
 408{
 409	unsigned long flags;
 410	int ret;
 411
 412	/*
 413	 * The FSM uses a funny double locking where state is protected by both
 414	 * the handler_mutex and the spinlock. State is not allowed to change
 415	 * to/from a handler_mutex protected value without also holding
 416	 * handler_mutex.
 417	 */
 418	if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
 419		lockdep_assert_held(&id_priv->handler_mutex);
 420
 421	spin_lock_irqsave(&id_priv->lock, flags);
 422	if ((ret = (id_priv->state == comp)))
 423		id_priv->state = exch;
 424	spin_unlock_irqrestore(&id_priv->lock, flags);
 425	return ret;
 426}
 427
 428static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
 429{
 430	return hdr->ip_version >> 4;
 431}
 432
 433static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
 434{
 435	hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
 436}
 437
 438static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
 439{
 440	return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
 441}
 442
 443static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
 444{
 445	return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
 446}
 447
 448static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
 449{
 450	struct in_device *in_dev = NULL;
 451
 452	if (ndev) {
 453		rtnl_lock();
 454		in_dev = __in_dev_get_rtnl(ndev);
 455		if (in_dev) {
 456			if (join)
 457				ip_mc_inc_group(in_dev,
 458						*(__be32 *)(mgid->raw + 12));
 459			else
 460				ip_mc_dec_group(in_dev,
 461						*(__be32 *)(mgid->raw + 12));
 462		}
 463		rtnl_unlock();
 464	}
 465	return (in_dev) ? 0 : -ENODEV;
 466}
 467
 468static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
 469				 struct id_table_entry *entry_b)
 470{
 471	struct rdma_id_private *id_priv = list_first_entry(
 472		&entry_b->id_list, struct rdma_id_private, id_list_entry);
 473	int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
 474	struct sockaddr *sb = cma_dst_addr(id_priv);
 475
 476	if (ifindex_a != ifindex_b)
 477		return (ifindex_a > ifindex_b) ? 1 : -1;
 478
 479	if (sa->sa_family != sb->sa_family)
 480		return sa->sa_family - sb->sa_family;
 481
 482	if (sa->sa_family == AF_INET &&
 483	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
 484		return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
 485			      &((struct sockaddr_in *)sb)->sin_addr,
 486			      sizeof(((struct sockaddr_in *)sa)->sin_addr));
 487	}
 488
 489	if (sa->sa_family == AF_INET6 &&
 490	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
 491		return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
 492				     &((struct sockaddr_in6 *)sb)->sin6_addr);
 493	}
 494
 495	return -1;
 496}
 497
 498static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
 499{
 500	struct rb_node **new, *parent = NULL;
 501	struct id_table_entry *this, *node;
 502	unsigned long flags;
 503	int result;
 504
 505	node = kzalloc(sizeof(*node), GFP_KERNEL);
 506	if (!node)
 507		return -ENOMEM;
 508
 509	spin_lock_irqsave(&id_table_lock, flags);
 510	new = &id_table.rb_node;
 511	while (*new) {
 512		this = container_of(*new, struct id_table_entry, rb_node);
 513		result = compare_netdev_and_ip(
 514			node_id_priv->id.route.addr.dev_addr.bound_dev_if,
 515			cma_dst_addr(node_id_priv), this);
 516
 517		parent = *new;
 518		if (result < 0)
 519			new = &((*new)->rb_left);
 520		else if (result > 0)
 521			new = &((*new)->rb_right);
 522		else {
 523			list_add_tail(&node_id_priv->id_list_entry,
 524				      &this->id_list);
 525			kfree(node);
 526			goto unlock;
 527		}
 528	}
 529
 530	INIT_LIST_HEAD(&node->id_list);
 531	list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
 532
 533	rb_link_node(&node->rb_node, parent, new);
 534	rb_insert_color(&node->rb_node, &id_table);
 535
 536unlock:
 537	spin_unlock_irqrestore(&id_table_lock, flags);
 538	return 0;
 539}
 540
 541static struct id_table_entry *
 542node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
 543{
 544	struct rb_node *node = root->rb_node;
 545	struct id_table_entry *data;
 546	int result;
 547
 548	while (node) {
 549		data = container_of(node, struct id_table_entry, rb_node);
 550		result = compare_netdev_and_ip(ifindex, sa, data);
 551		if (result < 0)
 552			node = node->rb_left;
 553		else if (result > 0)
 554			node = node->rb_right;
 555		else
 556			return data;
 557	}
 558
 559	return NULL;
 560}
 561
 562static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
 563{
 564	struct id_table_entry *data;
 565	unsigned long flags;
 566
 567	spin_lock_irqsave(&id_table_lock, flags);
 568	if (list_empty(&id_priv->id_list_entry))
 569		goto out;
 570
 571	data = node_from_ndev_ip(&id_table,
 572				 id_priv->id.route.addr.dev_addr.bound_dev_if,
 573				 cma_dst_addr(id_priv));
 574	if (!data)
 575		goto out;
 576
 577	list_del_init(&id_priv->id_list_entry);
 578	if (list_empty(&data->id_list)) {
 579		rb_erase(&data->rb_node, &id_table);
 580		kfree(data);
 581	}
 582out:
 583	spin_unlock_irqrestore(&id_table_lock, flags);
 584}
 585
 586static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
 587			       struct cma_device *cma_dev)
 588{
 589	cma_dev_get(cma_dev);
 590	id_priv->cma_dev = cma_dev;
 591	id_priv->id.device = cma_dev->device;
 592	id_priv->id.route.addr.dev_addr.transport =
 593		rdma_node_get_transport(cma_dev->device->node_type);
 594	list_add_tail(&id_priv->device_item, &cma_dev->id_list);
 595
 596	trace_cm_id_attach(id_priv, cma_dev->device);
 597}
 598
 599static void cma_attach_to_dev(struct rdma_id_private *id_priv,
 600			      struct cma_device *cma_dev)
 601{
 602	_cma_attach_to_dev(id_priv, cma_dev);
 603	id_priv->gid_type =
 604		cma_dev->default_gid_type[id_priv->id.port_num -
 605					  rdma_start_port(cma_dev->device)];
 606}
 607
 608static void cma_release_dev(struct rdma_id_private *id_priv)
 609{
 610	mutex_lock(&lock);
 611	list_del_init(&id_priv->device_item);
 612	cma_dev_put(id_priv->cma_dev);
 613	id_priv->cma_dev = NULL;
 614	id_priv->id.device = NULL;
 615	if (id_priv->id.route.addr.dev_addr.sgid_attr) {
 616		rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
 617		id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
 618	}
 619	mutex_unlock(&lock);
 620}
 621
 622static inline unsigned short cma_family(struct rdma_id_private *id_priv)
 623{
 624	return id_priv->id.route.addr.src_addr.ss_family;
 625}
 626
 627static int cma_set_default_qkey(struct rdma_id_private *id_priv)
 628{
 629	struct ib_sa_mcmember_rec rec;
 630	int ret = 0;
 631
 632	switch (id_priv->id.ps) {
 633	case RDMA_PS_UDP:
 634	case RDMA_PS_IB:
 635		id_priv->qkey = RDMA_UDP_QKEY;
 636		break;
 637	case RDMA_PS_IPOIB:
 638		ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
 639		ret = ib_sa_get_mcmember_rec(id_priv->id.device,
 640					     id_priv->id.port_num, &rec.mgid,
 641					     &rec);
 642		if (!ret)
 643			id_priv->qkey = be32_to_cpu(rec.qkey);
 644		break;
 645	default:
 646		break;
 647	}
 648	return ret;
 649}
 650
 651static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
 652{
 653	if (!qkey ||
 654	    (id_priv->qkey && (id_priv->qkey != qkey)))
 655		return -EINVAL;
 656
 657	id_priv->qkey = qkey;
 658	return 0;
 659}
 660
 661static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
 662{
 663	dev_addr->dev_type = ARPHRD_INFINIBAND;
 664	rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
 665	ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
 666}
 667
 668static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
 669{
 670	int ret;
 671
 672	if (addr->sa_family != AF_IB) {
 673		ret = rdma_translate_ip(addr, dev_addr);
 674	} else {
 675		cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
 676		ret = 0;
 677	}
 678
 679	return ret;
 680}
 681
 682static const struct ib_gid_attr *
 683cma_validate_port(struct ib_device *device, u32 port,
 684		  enum ib_gid_type gid_type,
 685		  union ib_gid *gid,
 686		  struct rdma_id_private *id_priv)
 687{
 688	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 689	const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV);
 690	int bound_if_index = dev_addr->bound_dev_if;
 691	int dev_type = dev_addr->dev_type;
 692	struct net_device *ndev = NULL;
 693	struct net_device *pdev = NULL;
 694
 695	if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
 696		goto out;
 697
 698	if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
 699		goto out;
 700
 701	if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
 702		goto out;
 703
 704	/*
 705	 * For drivers that do not associate more than one net device with
 706	 * their gid tables, such as iWARP drivers, it is sufficient to
 707	 * return the first table entry.
 708	 *
 709	 * Other driver classes might be included in the future.
 710	 */
 711	if (rdma_protocol_iwarp(device, port)) {
 712		sgid_attr = rdma_get_gid_attr(device, port, 0);
 713		if (IS_ERR(sgid_attr))
 714			goto out;
 715
 716		rcu_read_lock();
 717		ndev = rcu_dereference(sgid_attr->ndev);
 718		if (ndev->ifindex != bound_if_index) {
 719			pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
 720			if (pdev) {
 721				if (is_vlan_dev(pdev)) {
 722					pdev = vlan_dev_real_dev(pdev);
 723					if (ndev->ifindex == pdev->ifindex)
 724						bound_if_index = pdev->ifindex;
 725				}
 726				if (is_vlan_dev(ndev)) {
 727					pdev = vlan_dev_real_dev(ndev);
 728					if (bound_if_index == pdev->ifindex)
 729						bound_if_index = ndev->ifindex;
 730				}
 731			}
 732		}
 733		if (!net_eq(dev_net(ndev), dev_addr->net) ||
 734		    ndev->ifindex != bound_if_index) {
 735			rdma_put_gid_attr(sgid_attr);
 736			sgid_attr = ERR_PTR(-ENODEV);
 737		}
 738		rcu_read_unlock();
 739		goto out;
 740	}
 741
 742	if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
 743		ndev = dev_get_by_index(dev_addr->net, bound_if_index);
 744		if (!ndev)
 745			goto out;
 746	} else {
 747		gid_type = IB_GID_TYPE_IB;
 748	}
 749
 750	sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
 751	dev_put(ndev);
 752out:
 753	return sgid_attr;
 754}
 755
 756static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
 757			       const struct ib_gid_attr *sgid_attr)
 758{
 759	WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
 760	id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
 761}
 762
 763/**
 764 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
 765 * based on source ip address.
 766 * @id_priv:	cm_id which should be bound to cma device
 767 *
 768 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
 769 * based on source IP address. It returns 0 on success or error code otherwise.
 770 * It is applicable to active and passive side cm_id.
 771 */
 772static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
 773{
 774	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 775	const struct ib_gid_attr *sgid_attr;
 776	union ib_gid gid, iboe_gid, *gidp;
 777	struct cma_device *cma_dev;
 778	enum ib_gid_type gid_type;
 779	int ret = -ENODEV;
 780	u32 port;
 781
 782	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
 783	    id_priv->id.ps == RDMA_PS_IPOIB)
 784		return -EINVAL;
 785
 786	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
 787		    &iboe_gid);
 788
 789	memcpy(&gid, dev_addr->src_dev_addr +
 790	       rdma_addr_gid_offset(dev_addr), sizeof(gid));
 791
 792	mutex_lock(&lock);
 793	list_for_each_entry(cma_dev, &dev_list, list) {
 794		rdma_for_each_port (cma_dev->device, port) {
 795			gidp = rdma_protocol_roce(cma_dev->device, port) ?
 796			       &iboe_gid : &gid;
 797			gid_type = cma_dev->default_gid_type[port - 1];
 798			sgid_attr = cma_validate_port(cma_dev->device, port,
 799						      gid_type, gidp, id_priv);
 800			if (!IS_ERR(sgid_attr)) {
 801				id_priv->id.port_num = port;
 802				cma_bind_sgid_attr(id_priv, sgid_attr);
 803				cma_attach_to_dev(id_priv, cma_dev);
 804				ret = 0;
 805				goto out;
 806			}
 807		}
 808	}
 809out:
 810	mutex_unlock(&lock);
 811	return ret;
 812}
 813
 814/**
 815 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
 816 * @id_priv:		cm id to bind to cma device
 817 * @listen_id_priv:	listener cm id to match against
 818 * @req:		Pointer to req structure containaining incoming
 819 *			request information
 820 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
 821 * rdma device matches for listen_id and incoming request. It also verifies
 822 * that a GID table entry is present for the source address.
 823 * Returns 0 on success, or returns error code otherwise.
 824 */
 825static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
 826			      const struct rdma_id_private *listen_id_priv,
 827			      struct cma_req_info *req)
 828{
 829	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 830	const struct ib_gid_attr *sgid_attr;
 831	enum ib_gid_type gid_type;
 832	union ib_gid gid;
 833
 834	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
 835	    id_priv->id.ps == RDMA_PS_IPOIB)
 836		return -EINVAL;
 837
 838	if (rdma_protocol_roce(req->device, req->port))
 839		rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
 840			    &gid);
 841	else
 842		memcpy(&gid, dev_addr->src_dev_addr +
 843		       rdma_addr_gid_offset(dev_addr), sizeof(gid));
 844
 845	gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
 846	sgid_attr = cma_validate_port(req->device, req->port,
 847				      gid_type, &gid, id_priv);
 848	if (IS_ERR(sgid_attr))
 849		return PTR_ERR(sgid_attr);
 850
 851	id_priv->id.port_num = req->port;
 852	cma_bind_sgid_attr(id_priv, sgid_attr);
 853	/* Need to acquire lock to protect against reader
 854	 * of cma_dev->id_list such as cma_netdev_callback() and
 855	 * cma_process_remove().
 856	 */
 857	mutex_lock(&lock);
 858	cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
 859	mutex_unlock(&lock);
 860	rdma_restrack_add(&id_priv->res);
 861	return 0;
 862}
 863
 864static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
 865			      const struct rdma_id_private *listen_id_priv)
 866{
 867	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
 868	const struct ib_gid_attr *sgid_attr;
 869	struct cma_device *cma_dev;
 870	enum ib_gid_type gid_type;
 871	int ret = -ENODEV;
 872	union ib_gid gid;
 873	u32 port;
 874
 875	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
 876	    id_priv->id.ps == RDMA_PS_IPOIB)
 877		return -EINVAL;
 878
 879	memcpy(&gid, dev_addr->src_dev_addr +
 880	       rdma_addr_gid_offset(dev_addr), sizeof(gid));
 881
 882	mutex_lock(&lock);
 883
 884	cma_dev = listen_id_priv->cma_dev;
 885	port = listen_id_priv->id.port_num;
 886	gid_type = listen_id_priv->gid_type;
 887	sgid_attr = cma_validate_port(cma_dev->device, port,
 888				      gid_type, &gid, id_priv);
 889	if (!IS_ERR(sgid_attr)) {
 890		id_priv->id.port_num = port;
 891		cma_bind_sgid_attr(id_priv, sgid_attr);
 892		ret = 0;
 893		goto out;
 894	}
 895
 896	list_for_each_entry(cma_dev, &dev_list, list) {
 897		rdma_for_each_port (cma_dev->device, port) {
 898			if (listen_id_priv->cma_dev == cma_dev &&
 899			    listen_id_priv->id.port_num == port)
 900				continue;
 901
 902			gid_type = cma_dev->default_gid_type[port - 1];
 903			sgid_attr = cma_validate_port(cma_dev->device, port,
 904						      gid_type, &gid, id_priv);
 905			if (!IS_ERR(sgid_attr)) {
 906				id_priv->id.port_num = port;
 907				cma_bind_sgid_attr(id_priv, sgid_attr);
 908				ret = 0;
 909				goto out;
 910			}
 911		}
 912	}
 913
 914out:
 915	if (!ret) {
 916		cma_attach_to_dev(id_priv, cma_dev);
 917		rdma_restrack_add(&id_priv->res);
 918	}
 919
 920	mutex_unlock(&lock);
 921	return ret;
 922}
 923
 924/*
 925 * Select the source IB device and address to reach the destination IB address.
 926 */
 927static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
 928{
 929	struct cma_device *cma_dev, *cur_dev;
 930	struct sockaddr_ib *addr;
 931	union ib_gid gid, sgid, *dgid;
 932	unsigned int p;
 933	u16 pkey, index;
 934	enum ib_port_state port_state;
 935	int ret;
 936	int i;
 937
 938	cma_dev = NULL;
 939	addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
 940	dgid = (union ib_gid *) &addr->sib_addr;
 941	pkey = ntohs(addr->sib_pkey);
 942
 943	mutex_lock(&lock);
 944	list_for_each_entry(cur_dev, &dev_list, list) {
 945		rdma_for_each_port (cur_dev->device, p) {
 946			if (!rdma_cap_af_ib(cur_dev->device, p))
 947				continue;
 948
 949			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
 950				continue;
 951
 952			if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
 953				continue;
 954
 955			for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
 956			     ++i) {
 957				ret = rdma_query_gid(cur_dev->device, p, i,
 958						     &gid);
 959				if (ret)
 960					continue;
 961
 962				if (!memcmp(&gid, dgid, sizeof(gid))) {
 963					cma_dev = cur_dev;
 964					sgid = gid;
 965					id_priv->id.port_num = p;
 966					goto found;
 967				}
 968
 969				if (!cma_dev && (gid.global.subnet_prefix ==
 970				    dgid->global.subnet_prefix) &&
 971				    port_state == IB_PORT_ACTIVE) {
 972					cma_dev = cur_dev;
 973					sgid = gid;
 974					id_priv->id.port_num = p;
 975					goto found;
 976				}
 977			}
 978		}
 979	}
 980	mutex_unlock(&lock);
 981	return -ENODEV;
 982
 983found:
 984	cma_attach_to_dev(id_priv, cma_dev);
 985	rdma_restrack_add(&id_priv->res);
 986	mutex_unlock(&lock);
 987	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
 988	memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
 989	cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
 990	return 0;
 991}
 992
 993static void cma_id_get(struct rdma_id_private *id_priv)
 994{
 995	refcount_inc(&id_priv->refcount);
 996}
 997
 998static void cma_id_put(struct rdma_id_private *id_priv)
 999{
1000	if (refcount_dec_and_test(&id_priv->refcount))
1001		complete(&id_priv->comp);
1002}
1003
1004static struct rdma_id_private *
1005__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
1006		 void *context, enum rdma_ucm_port_space ps,
1007		 enum ib_qp_type qp_type, const struct rdma_id_private *parent)
1008{
1009	struct rdma_id_private *id_priv;
1010
1011	id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
1012	if (!id_priv)
1013		return ERR_PTR(-ENOMEM);
1014
1015	id_priv->state = RDMA_CM_IDLE;
1016	id_priv->id.context = context;
1017	id_priv->id.event_handler = event_handler;
1018	id_priv->id.ps = ps;
1019	id_priv->id.qp_type = qp_type;
1020	id_priv->tos_set = false;
1021	id_priv->timeout_set = false;
1022	id_priv->min_rnr_timer_set = false;
1023	id_priv->gid_type = IB_GID_TYPE_IB;
1024	spin_lock_init(&id_priv->lock);
1025	mutex_init(&id_priv->qp_mutex);
1026	init_completion(&id_priv->comp);
1027	refcount_set(&id_priv->refcount, 1);
1028	mutex_init(&id_priv->handler_mutex);
1029	INIT_LIST_HEAD(&id_priv->device_item);
1030	INIT_LIST_HEAD(&id_priv->id_list_entry);
1031	INIT_LIST_HEAD(&id_priv->listen_list);
1032	INIT_LIST_HEAD(&id_priv->mc_list);
1033	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
1034	id_priv->id.route.addr.dev_addr.net = get_net(net);
1035	id_priv->seq_num &= 0x00ffffff;
1036
1037	rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
1038	if (parent)
1039		rdma_restrack_parent_name(&id_priv->res, &parent->res);
1040
1041	return id_priv;
1042}
1043
1044struct rdma_cm_id *
1045__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
1046			void *context, enum rdma_ucm_port_space ps,
1047			enum ib_qp_type qp_type, const char *caller)
1048{
1049	struct rdma_id_private *ret;
1050
1051	ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
1052	if (IS_ERR(ret))
1053		return ERR_CAST(ret);
1054
1055	rdma_restrack_set_name(&ret->res, caller);
1056	return &ret->id;
1057}
1058EXPORT_SYMBOL(__rdma_create_kernel_id);
1059
1060struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
1061				       void *context,
1062				       enum rdma_ucm_port_space ps,
1063				       enum ib_qp_type qp_type)
1064{
1065	struct rdma_id_private *ret;
1066
1067	ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
1068			       ps, qp_type, NULL);
1069	if (IS_ERR(ret))
1070		return ERR_CAST(ret);
1071
1072	rdma_restrack_set_name(&ret->res, NULL);
1073	return &ret->id;
1074}
1075EXPORT_SYMBOL(rdma_create_user_id);
1076
1077static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1078{
1079	struct ib_qp_attr qp_attr;
1080	int qp_attr_mask, ret;
1081
1082	qp_attr.qp_state = IB_QPS_INIT;
1083	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1084	if (ret)
1085		return ret;
1086
1087	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1088	if (ret)
1089		return ret;
1090
1091	qp_attr.qp_state = IB_QPS_RTR;
1092	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
1093	if (ret)
1094		return ret;
1095
1096	qp_attr.qp_state = IB_QPS_RTS;
1097	qp_attr.sq_psn = 0;
1098	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
1099
1100	return ret;
1101}
1102
1103static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1104{
1105	struct ib_qp_attr qp_attr;
1106	int qp_attr_mask, ret;
1107
1108	qp_attr.qp_state = IB_QPS_INIT;
1109	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1110	if (ret)
1111		return ret;
1112
1113	return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1114}
1115
1116int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
1117		   struct ib_qp_init_attr *qp_init_attr)
1118{
1119	struct rdma_id_private *id_priv;
1120	struct ib_qp *qp;
1121	int ret;
1122
1123	id_priv = container_of(id, struct rdma_id_private, id);
1124	if (id->device != pd->device) {
1125		ret = -EINVAL;
1126		goto out_err;
1127	}
1128
1129	qp_init_attr->port_num = id->port_num;
1130	qp = ib_create_qp(pd, qp_init_attr);
1131	if (IS_ERR(qp)) {
1132		ret = PTR_ERR(qp);
1133		goto out_err;
1134	}
1135
1136	if (id->qp_type == IB_QPT_UD)
1137		ret = cma_init_ud_qp(id_priv, qp);
1138	else
1139		ret = cma_init_conn_qp(id_priv, qp);
1140	if (ret)
1141		goto out_destroy;
1142
1143	id->qp = qp;
1144	id_priv->qp_num = qp->qp_num;
1145	id_priv->srq = (qp->srq != NULL);
1146	trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
1147	return 0;
1148out_destroy:
1149	ib_destroy_qp(qp);
1150out_err:
1151	trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
1152	return ret;
1153}
1154EXPORT_SYMBOL(rdma_create_qp);
1155
1156void rdma_destroy_qp(struct rdma_cm_id *id)
1157{
1158	struct rdma_id_private *id_priv;
1159
1160	id_priv = container_of(id, struct rdma_id_private, id);
1161	trace_cm_qp_destroy(id_priv);
1162	mutex_lock(&id_priv->qp_mutex);
1163	ib_destroy_qp(id_priv->id.qp);
1164	id_priv->id.qp = NULL;
1165	mutex_unlock(&id_priv->qp_mutex);
1166}
1167EXPORT_SYMBOL(rdma_destroy_qp);
1168
1169static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
1170			     struct rdma_conn_param *conn_param)
1171{
1172	struct ib_qp_attr qp_attr;
1173	int qp_attr_mask, ret;
1174
1175	mutex_lock(&id_priv->qp_mutex);
1176	if (!id_priv->id.qp) {
1177		ret = 0;
1178		goto out;
1179	}
1180
1181	/* Need to update QP attributes from default values. */
1182	qp_attr.qp_state = IB_QPS_INIT;
1183	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1184	if (ret)
1185		goto out;
1186
1187	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1188	if (ret)
1189		goto out;
1190
1191	qp_attr.qp_state = IB_QPS_RTR;
1192	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1193	if (ret)
1194		goto out;
1195
1196	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1197
1198	if (conn_param)
1199		qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
1200	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1201out:
1202	mutex_unlock(&id_priv->qp_mutex);
1203	return ret;
1204}
1205
1206static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
1207			     struct rdma_conn_param *conn_param)
1208{
1209	struct ib_qp_attr qp_attr;
1210	int qp_attr_mask, ret;
1211
1212	mutex_lock(&id_priv->qp_mutex);
1213	if (!id_priv->id.qp) {
1214		ret = 0;
1215		goto out;
1216	}
1217
1218	qp_attr.qp_state = IB_QPS_RTS;
1219	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1220	if (ret)
1221		goto out;
1222
1223	if (conn_param)
1224		qp_attr.max_rd_atomic = conn_param->initiator_depth;
1225	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1226out:
1227	mutex_unlock(&id_priv->qp_mutex);
1228	return ret;
1229}
1230
1231static int cma_modify_qp_err(struct rdma_id_private *id_priv)
1232{
1233	struct ib_qp_attr qp_attr;
1234	int ret;
1235
1236	mutex_lock(&id_priv->qp_mutex);
1237	if (!id_priv->id.qp) {
1238		ret = 0;
1239		goto out;
1240	}
1241
1242	qp_attr.qp_state = IB_QPS_ERR;
1243	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
1244out:
1245	mutex_unlock(&id_priv->qp_mutex);
1246	return ret;
1247}
1248
1249static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
1250			       struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1251{
1252	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
1253	int ret;
1254	u16 pkey;
1255
1256	if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
1257		pkey = 0xffff;
1258	else
1259		pkey = ib_addr_get_pkey(dev_addr);
1260
1261	ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
1262				  pkey, &qp_attr->pkey_index);
1263	if (ret)
1264		return ret;
1265
1266	qp_attr->port_num = id_priv->id.port_num;
1267	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
1268
1269	if (id_priv->id.qp_type == IB_QPT_UD) {
1270		ret = cma_set_default_qkey(id_priv);
1271		if (ret)
1272			return ret;
1273
1274		qp_attr->qkey = id_priv->qkey;
1275		*qp_attr_mask |= IB_QP_QKEY;
1276	} else {
1277		qp_attr->qp_access_flags = 0;
1278		*qp_attr_mask |= IB_QP_ACCESS_FLAGS;
1279	}
1280	return 0;
1281}
1282
1283int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1284		       int *qp_attr_mask)
1285{
1286	struct rdma_id_private *id_priv;
1287	int ret = 0;
1288
1289	id_priv = container_of(id, struct rdma_id_private, id);
1290	if (rdma_cap_ib_cm(id->device, id->port_num)) {
1291		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
1292			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
1293		else
1294			ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
1295						 qp_attr_mask);
1296
1297		if (qp_attr->qp_state == IB_QPS_RTR)
1298			qp_attr->rq_psn = id_priv->seq_num;
1299	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1300		if (!id_priv->cm_id.iw) {
1301			qp_attr->qp_access_flags = 0;
1302			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1303		} else
1304			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1305						 qp_attr_mask);
1306		qp_attr->port_num = id_priv->id.port_num;
1307		*qp_attr_mask |= IB_QP_PORT;
1308	} else {
1309		ret = -ENOSYS;
1310	}
1311
1312	if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
1313		qp_attr->timeout = id_priv->timeout;
1314
1315	if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
1316		qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
1317
1318	return ret;
1319}
1320EXPORT_SYMBOL(rdma_init_qp_attr);
1321
1322static inline bool cma_zero_addr(const struct sockaddr *addr)
1323{
1324	switch (addr->sa_family) {
1325	case AF_INET:
1326		return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
1327	case AF_INET6:
1328		return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
1329	case AF_IB:
1330		return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
1331	default:
1332		return false;
1333	}
1334}
1335
1336static inline bool cma_loopback_addr(const struct sockaddr *addr)
1337{
1338	switch (addr->sa_family) {
1339	case AF_INET:
1340		return ipv4_is_loopback(
1341			((struct sockaddr_in *)addr)->sin_addr.s_addr);
1342	case AF_INET6:
1343		return ipv6_addr_loopback(
1344			&((struct sockaddr_in6 *)addr)->sin6_addr);
1345	case AF_IB:
1346		return ib_addr_loopback(
1347			&((struct sockaddr_ib *)addr)->sib_addr);
1348	default:
1349		return false;
1350	}
1351}
1352
1353static inline bool cma_any_addr(const struct sockaddr *addr)
1354{
1355	return cma_zero_addr(addr) || cma_loopback_addr(addr);
1356}
1357
1358static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
1359{
1360	if (src->sa_family != dst->sa_family)
1361		return -1;
1362
1363	switch (src->sa_family) {
1364	case AF_INET:
1365		return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
1366		       ((struct sockaddr_in *)dst)->sin_addr.s_addr;
1367	case AF_INET6: {
1368		struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
1369		struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
1370		bool link_local;
1371
1372		if (ipv6_addr_cmp(&src_addr6->sin6_addr,
1373					  &dst_addr6->sin6_addr))
1374			return 1;
1375		link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
1376			     IPV6_ADDR_LINKLOCAL;
1377		/* Link local must match their scope_ids */
1378		return link_local ? (src_addr6->sin6_scope_id !=
1379				     dst_addr6->sin6_scope_id) :
1380				    0;
1381	}
1382
1383	default:
1384		return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
1385				   &((struct sockaddr_ib *) dst)->sib_addr);
1386	}
1387}
1388
1389static __be16 cma_port(const struct sockaddr *addr)
1390{
1391	struct sockaddr_ib *sib;
1392
1393	switch (addr->sa_family) {
1394	case AF_INET:
1395		return ((struct sockaddr_in *) addr)->sin_port;
1396	case AF_INET6:
1397		return ((struct sockaddr_in6 *) addr)->sin6_port;
1398	case AF_IB:
1399		sib = (struct sockaddr_ib *) addr;
1400		return htons((u16) (be64_to_cpu(sib->sib_sid) &
1401				    be64_to_cpu(sib->sib_sid_mask)));
1402	default:
1403		return 0;
1404	}
1405}
1406
1407static inline int cma_any_port(const struct sockaddr *addr)
1408{
1409	return !cma_port(addr);
1410}
1411
1412static void cma_save_ib_info(struct sockaddr *src_addr,
1413			     struct sockaddr *dst_addr,
1414			     const struct rdma_cm_id *listen_id,
1415			     const struct sa_path_rec *path)
1416{
1417	struct sockaddr_ib *listen_ib, *ib;
1418
1419	listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
1420	if (src_addr) {
1421		ib = (struct sockaddr_ib *)src_addr;
1422		ib->sib_family = AF_IB;
1423		if (path) {
1424			ib->sib_pkey = path->pkey;
1425			ib->sib_flowinfo = path->flow_label;
1426			memcpy(&ib->sib_addr, &path->sgid, 16);
1427			ib->sib_sid = path->service_id;
1428			ib->sib_scope_id = 0;
1429		} else {
1430			ib->sib_pkey = listen_ib->sib_pkey;
1431			ib->sib_flowinfo = listen_ib->sib_flowinfo;
1432			ib->sib_addr = listen_ib->sib_addr;
1433			ib->sib_sid = listen_ib->sib_sid;
1434			ib->sib_scope_id = listen_ib->sib_scope_id;
1435		}
1436		ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
1437	}
1438	if (dst_addr) {
1439		ib = (struct sockaddr_ib *)dst_addr;
1440		ib->sib_family = AF_IB;
1441		if (path) {
1442			ib->sib_pkey = path->pkey;
1443			ib->sib_flowinfo = path->flow_label;
1444			memcpy(&ib->sib_addr, &path->dgid, 16);
1445		}
1446	}
1447}
1448
1449static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1450			      struct sockaddr_in *dst_addr,
1451			      struct cma_hdr *hdr,
1452			      __be16 local_port)
1453{
1454	if (src_addr) {
1455		*src_addr = (struct sockaddr_in) {
1456			.sin_family = AF_INET,
1457			.sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1458			.sin_port = local_port,
1459		};
1460	}
1461
1462	if (dst_addr) {
1463		*dst_addr = (struct sockaddr_in) {
1464			.sin_family = AF_INET,
1465			.sin_addr.s_addr = hdr->src_addr.ip4.addr,
1466			.sin_port = hdr->port,
1467		};
1468	}
1469}
1470
1471static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1472			      struct sockaddr_in6 *dst_addr,
1473			      struct cma_hdr *hdr,
1474			      __be16 local_port)
1475{
1476	if (src_addr) {
1477		*src_addr = (struct sockaddr_in6) {
1478			.sin6_family = AF_INET6,
1479			.sin6_addr = hdr->dst_addr.ip6,
1480			.sin6_port = local_port,
1481		};
1482	}
1483
1484	if (dst_addr) {
1485		*dst_addr = (struct sockaddr_in6) {
1486			.sin6_family = AF_INET6,
1487			.sin6_addr = hdr->src_addr.ip6,
1488			.sin6_port = hdr->port,
1489		};
1490	}
1491}
1492
1493static u16 cma_port_from_service_id(__be64 service_id)
1494{
1495	return (u16)be64_to_cpu(service_id);
1496}
1497
1498static int cma_save_ip_info(struct sockaddr *src_addr,
1499			    struct sockaddr *dst_addr,
1500			    const struct ib_cm_event *ib_event,
1501			    __be64 service_id)
1502{
1503	struct cma_hdr *hdr;
1504	__be16 port;
1505
1506	hdr = ib_event->private_data;
1507	if (hdr->cma_version != CMA_VERSION)
1508		return -EINVAL;
1509
1510	port = htons(cma_port_from_service_id(service_id));
1511
1512	switch (cma_get_ip_ver(hdr)) {
1513	case 4:
1514		cma_save_ip4_info((struct sockaddr_in *)src_addr,
1515				  (struct sockaddr_in *)dst_addr, hdr, port);
1516		break;
1517	case 6:
1518		cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1519				  (struct sockaddr_in6 *)dst_addr, hdr, port);
1520		break;
1521	default:
1522		return -EAFNOSUPPORT;
1523	}
1524
1525	return 0;
1526}
1527
1528static int cma_save_net_info(struct sockaddr *src_addr,
1529			     struct sockaddr *dst_addr,
1530			     const struct rdma_cm_id *listen_id,
1531			     const struct ib_cm_event *ib_event,
1532			     sa_family_t sa_family, __be64 service_id)
1533{
1534	if (sa_family == AF_IB) {
1535		if (ib_event->event == IB_CM_REQ_RECEIVED)
1536			cma_save_ib_info(src_addr, dst_addr, listen_id,
1537					 ib_event->param.req_rcvd.primary_path);
1538		else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1539			cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1540		return 0;
1541	}
1542
1543	return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1544}
1545
1546static int cma_save_req_info(const struct ib_cm_event *ib_event,
1547			     struct cma_req_info *req)
1548{
1549	const struct ib_cm_req_event_param *req_param =
1550		&ib_event->param.req_rcvd;
1551	const struct ib_cm_sidr_req_event_param *sidr_param =
1552		&ib_event->param.sidr_req_rcvd;
1553
1554	switch (ib_event->event) {
1555	case IB_CM_REQ_RECEIVED:
1556		req->device	= req_param->listen_id->device;
1557		req->port	= req_param->port;
1558		memcpy(&req->local_gid, &req_param->primary_path->sgid,
1559		       sizeof(req->local_gid));
1560		req->has_gid	= true;
1561		req->service_id = req_param->primary_path->service_id;
1562		req->pkey	= be16_to_cpu(req_param->primary_path->pkey);
1563		if (req->pkey != req_param->bth_pkey)
1564			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1565					    "RDMA CMA: in the future this may cause the request to be dropped\n",
1566					    req_param->bth_pkey, req->pkey);
1567		break;
1568	case IB_CM_SIDR_REQ_RECEIVED:
1569		req->device	= sidr_param->listen_id->device;
1570		req->port	= sidr_param->port;
1571		req->has_gid	= false;
1572		req->service_id	= sidr_param->service_id;
1573		req->pkey	= sidr_param->pkey;
1574		if (req->pkey != sidr_param->bth_pkey)
1575			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1576					    "RDMA CMA: in the future this may cause the request to be dropped\n",
1577					    sidr_param->bth_pkey, req->pkey);
1578		break;
1579	default:
1580		return -EINVAL;
1581	}
1582
1583	return 0;
1584}
1585
1586static bool validate_ipv4_net_dev(struct net_device *net_dev,
1587				  const struct sockaddr_in *dst_addr,
1588				  const struct sockaddr_in *src_addr)
1589{
1590	__be32 daddr = dst_addr->sin_addr.s_addr,
1591	       saddr = src_addr->sin_addr.s_addr;
1592	struct fib_result res;
1593	struct flowi4 fl4;
1594	int err;
1595	bool ret;
1596
1597	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1598	    ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1599	    ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1600	    ipv4_is_loopback(saddr))
1601		return false;
1602
1603	memset(&fl4, 0, sizeof(fl4));
1604	fl4.flowi4_oif = net_dev->ifindex;
1605	fl4.daddr = daddr;
1606	fl4.saddr = saddr;
1607
1608	rcu_read_lock();
1609	err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1610	ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1611	rcu_read_unlock();
1612
1613	return ret;
1614}
1615
1616static bool validate_ipv6_net_dev(struct net_device *net_dev,
1617				  const struct sockaddr_in6 *dst_addr,
1618				  const struct sockaddr_in6 *src_addr)
1619{
1620#if IS_ENABLED(CONFIG_IPV6)
1621	const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
1622			   IPV6_ADDR_LINKLOCAL;
1623	struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
1624					 &src_addr->sin6_addr, net_dev->ifindex,
1625					 NULL, strict);
1626	bool ret;
1627
1628	if (!rt)
1629		return false;
1630
1631	ret = rt->rt6i_idev->dev == net_dev;
1632	ip6_rt_put(rt);
1633
1634	return ret;
1635#else
1636	return false;
1637#endif
1638}
1639
1640static bool validate_net_dev(struct net_device *net_dev,
1641			     const struct sockaddr *daddr,
1642			     const struct sockaddr *saddr)
1643{
1644	const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1645	const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1646	const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1647	const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1648
1649	switch (daddr->sa_family) {
1650	case AF_INET:
1651		return saddr->sa_family == AF_INET &&
1652		       validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1653
1654	case AF_INET6:
1655		return saddr->sa_family == AF_INET6 &&
1656		       validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1657
1658	default:
1659		return false;
1660	}
1661}
1662
1663static struct net_device *
1664roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
1665{
1666	const struct ib_gid_attr *sgid_attr = NULL;
1667	struct net_device *ndev;
1668
1669	if (ib_event->event == IB_CM_REQ_RECEIVED)
1670		sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
1671	else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1672		sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
1673
1674	if (!sgid_attr)
1675		return NULL;
1676
1677	rcu_read_lock();
1678	ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
1679	if (IS_ERR(ndev))
1680		ndev = NULL;
1681	else
1682		dev_hold(ndev);
1683	rcu_read_unlock();
1684	return ndev;
1685}
1686
1687static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
1688					  struct cma_req_info *req)
1689{
1690	struct sockaddr *listen_addr =
1691			(struct sockaddr *)&req->listen_addr_storage;
1692	struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
1693	struct net_device *net_dev;
1694	const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1695	int err;
1696
1697	err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1698			       req->service_id);
1699	if (err)
1700		return ERR_PTR(err);
1701
1702	if (rdma_protocol_roce(req->device, req->port))
1703		net_dev = roce_get_net_dev_by_cm_event(ib_event);
1704	else
1705		net_dev = ib_get_net_dev_by_params(req->device, req->port,
1706						   req->pkey,
1707						   gid, listen_addr);
1708	if (!net_dev)
1709		return ERR_PTR(-ENODEV);
1710
1711	return net_dev;
1712}
1713
1714static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
1715{
1716	return (be64_to_cpu(service_id) >> 16) & 0xffff;
1717}
1718
1719static bool cma_match_private_data(struct rdma_id_private *id_priv,
1720				   const struct cma_hdr *hdr)
1721{
1722	struct sockaddr *addr = cma_src_addr(id_priv);
1723	__be32 ip4_addr;
1724	struct in6_addr ip6_addr;
1725
1726	if (cma_any_addr(addr) && !id_priv->afonly)
1727		return true;
1728
1729	switch (addr->sa_family) {
1730	case AF_INET:
1731		ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1732		if (cma_get_ip_ver(hdr) != 4)
1733			return false;
1734		if (!cma_any_addr(addr) &&
1735		    hdr->dst_addr.ip4.addr != ip4_addr)
1736			return false;
1737		break;
1738	case AF_INET6:
1739		ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1740		if (cma_get_ip_ver(hdr) != 6)
1741			return false;
1742		if (!cma_any_addr(addr) &&
1743		    memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1744			return false;
1745		break;
1746	case AF_IB:
1747		return true;
1748	default:
1749		return false;
1750	}
1751
1752	return true;
1753}
1754
1755static bool cma_protocol_roce(const struct rdma_cm_id *id)
1756{
1757	struct ib_device *device = id->device;
1758	const u32 port_num = id->port_num ?: rdma_start_port(device);
1759
1760	return rdma_protocol_roce(device, port_num);
1761}
1762
1763static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
1764{
1765	const struct sockaddr *daddr =
1766			(const struct sockaddr *)&req->listen_addr_storage;
1767	const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1768
1769	/* Returns true if the req is for IPv6 link local */
1770	return (daddr->sa_family == AF_INET6 &&
1771		(ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
1772}
1773
1774static bool cma_match_net_dev(const struct rdma_cm_id *id,
1775			      const struct net_device *net_dev,
1776			      const struct cma_req_info *req)
1777{
1778	const struct rdma_addr *addr = &id->route.addr;
1779
1780	if (!net_dev)
1781		/* This request is an AF_IB request */
1782		return (!id->port_num || id->port_num == req->port) &&
1783		       (addr->src_addr.ss_family == AF_IB);
1784
1785	/*
1786	 * If the request is not for IPv6 link local, allow matching
1787	 * request to any netdevice of the one or multiport rdma device.
1788	 */
1789	if (!cma_is_req_ipv6_ll(req))
1790		return true;
1791	/*
1792	 * Net namespaces must match, and if the listner is listening
1793	 * on a specific netdevice than netdevice must match as well.
1794	 */
1795	if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1796	    (!!addr->dev_addr.bound_dev_if ==
1797	     (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1798		return true;
1799	else
1800		return false;
1801}
1802
1803static struct rdma_id_private *cma_find_listener(
1804		const struct rdma_bind_list *bind_list,
1805		const struct ib_cm_id *cm_id,
1806		const struct ib_cm_event *ib_event,
1807		const struct cma_req_info *req,
1808		const struct net_device *net_dev)
1809{
1810	struct rdma_id_private *id_priv, *id_priv_dev;
1811
1812	lockdep_assert_held(&lock);
1813
1814	if (!bind_list)
1815		return ERR_PTR(-EINVAL);
1816
1817	hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1818		if (cma_match_private_data(id_priv, ib_event->private_data)) {
1819			if (id_priv->id.device == cm_id->device &&
1820			    cma_match_net_dev(&id_priv->id, net_dev, req))
1821				return id_priv;
1822			list_for_each_entry(id_priv_dev,
1823					    &id_priv->listen_list,
1824					    listen_item) {
1825				if (id_priv_dev->id.device == cm_id->device &&
1826				    cma_match_net_dev(&id_priv_dev->id,
1827						      net_dev, req))
1828					return id_priv_dev;
1829			}
1830		}
1831	}
1832
1833	return ERR_PTR(-EINVAL);
1834}
1835
1836static struct rdma_id_private *
1837cma_ib_id_from_event(struct ib_cm_id *cm_id,
1838		     const struct ib_cm_event *ib_event,
1839		     struct cma_req_info *req,
1840		     struct net_device **net_dev)
1841{
1842	struct rdma_bind_list *bind_list;
1843	struct rdma_id_private *id_priv;
1844	int err;
1845
1846	err = cma_save_req_info(ib_event, req);
1847	if (err)
1848		return ERR_PTR(err);
1849
1850	*net_dev = cma_get_net_dev(ib_event, req);
1851	if (IS_ERR(*net_dev)) {
1852		if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
1853			/* Assuming the protocol is AF_IB */
1854			*net_dev = NULL;
1855		} else {
1856			return ERR_CAST(*net_dev);
1857		}
1858	}
1859
1860	mutex_lock(&lock);
1861	/*
1862	 * Net namespace might be getting deleted while route lookup,
1863	 * cm_id lookup is in progress. Therefore, perform netdevice
1864	 * validation, cm_id lookup under rcu lock.
1865	 * RCU lock along with netdevice state check, synchronizes with
1866	 * netdevice migrating to different net namespace and also avoids
1867	 * case where net namespace doesn't get deleted while lookup is in
1868	 * progress.
1869	 * If the device state is not IFF_UP, its properties such as ifindex
1870	 * and nd_net cannot be trusted to remain valid without rcu lock.
1871	 * net/core/dev.c change_net_namespace() ensures to synchronize with
1872	 * ongoing operations on net device after device is closed using
1873	 * synchronize_net().
1874	 */
1875	rcu_read_lock();
1876	if (*net_dev) {
1877		/*
1878		 * If netdevice is down, it is likely that it is administratively
1879		 * down or it might be migrating to different namespace.
1880		 * In that case avoid further processing, as the net namespace
1881		 * or ifindex may change.
1882		 */
1883		if (((*net_dev)->flags & IFF_UP) == 0) {
1884			id_priv = ERR_PTR(-EHOSTUNREACH);
1885			goto err;
1886		}
1887
1888		if (!validate_net_dev(*net_dev,
1889				 (struct sockaddr *)&req->src_addr_storage,
1890				 (struct sockaddr *)&req->listen_addr_storage)) {
1891			id_priv = ERR_PTR(-EHOSTUNREACH);
1892			goto err;
1893		}
1894	}
1895
1896	bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
1897				rdma_ps_from_service_id(req->service_id),
1898				cma_port_from_service_id(req->service_id));
1899	id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
1900err:
1901	rcu_read_unlock();
1902	mutex_unlock(&lock);
1903	if (IS_ERR(id_priv) && *net_dev) {
1904		dev_put(*net_dev);
1905		*net_dev = NULL;
1906	}
1907	return id_priv;
1908}
1909
1910static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
1911{
1912	return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1913}
1914
1915static void cma_cancel_route(struct rdma_id_private *id_priv)
1916{
1917	if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1918		if (id_priv->query)
1919			ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1920	}
1921}
1922
1923static void _cma_cancel_listens(struct rdma_id_private *id_priv)
1924{
1925	struct rdma_id_private *dev_id_priv;
1926
1927	lockdep_assert_held(&lock);
1928
1929	/*
1930	 * Remove from listen_any_list to prevent added devices from spawning
1931	 * additional listen requests.
1932	 */
1933	list_del_init(&id_priv->listen_any_item);
1934
1935	while (!list_empty(&id_priv->listen_list)) {
1936		dev_id_priv =
1937			list_first_entry(&id_priv->listen_list,
1938					 struct rdma_id_private, listen_item);
1939		/* sync with device removal to avoid duplicate destruction */
1940		list_del_init(&dev_id_priv->device_item);
1941		list_del_init(&dev_id_priv->listen_item);
1942		mutex_unlock(&lock);
1943
1944		rdma_destroy_id(&dev_id_priv->id);
1945		mutex_lock(&lock);
1946	}
1947}
1948
1949static void cma_cancel_listens(struct rdma_id_private *id_priv)
1950{
1951	mutex_lock(&lock);
1952	_cma_cancel_listens(id_priv);
1953	mutex_unlock(&lock);
1954}
1955
1956static void cma_cancel_operation(struct rdma_id_private *id_priv,
1957				 enum rdma_cm_state state)
1958{
1959	switch (state) {
1960	case RDMA_CM_ADDR_QUERY:
1961		/*
1962		 * We can avoid doing the rdma_addr_cancel() based on state,
1963		 * only RDMA_CM_ADDR_QUERY has a work that could still execute.
1964		 * Notice that the addr_handler work could still be exiting
1965		 * outside this state, however due to the interaction with the
1966		 * handler_mutex the work is guaranteed not to touch id_priv
1967		 * during exit.
1968		 */
1969		rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1970		break;
1971	case RDMA_CM_ROUTE_QUERY:
1972		cma_cancel_route(id_priv);
1973		break;
1974	case RDMA_CM_LISTEN:
1975		if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1976			cma_cancel_listens(id_priv);
1977		break;
1978	default:
1979		break;
1980	}
1981}
1982
1983static void cma_release_port(struct rdma_id_private *id_priv)
1984{
1985	struct rdma_bind_list *bind_list = id_priv->bind_list;
1986	struct net *net = id_priv->id.route.addr.dev_addr.net;
1987
1988	if (!bind_list)
1989		return;
1990
1991	mutex_lock(&lock);
1992	hlist_del(&id_priv->node);
1993	if (hlist_empty(&bind_list->owners)) {
1994		cma_ps_remove(net, bind_list->ps, bind_list->port);
1995		kfree(bind_list);
1996	}
1997	mutex_unlock(&lock);
1998}
1999
2000static void destroy_mc(struct rdma_id_private *id_priv,
2001		       struct cma_multicast *mc)
2002{
2003	bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
2004
2005	if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
2006		ib_sa_free_multicast(mc->sa_mc);
2007
2008	if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
2009		struct rdma_dev_addr *dev_addr =
2010			&id_priv->id.route.addr.dev_addr;
2011		struct net_device *ndev = NULL;
2012
2013		if (dev_addr->bound_dev_if)
2014			ndev = dev_get_by_index(dev_addr->net,
2015						dev_addr->bound_dev_if);
2016		if (ndev && !send_only) {
2017			enum ib_gid_type gid_type;
2018			union ib_gid mgid;
2019
2020			gid_type = id_priv->cma_dev->default_gid_type
2021					   [id_priv->id.port_num -
2022					    rdma_start_port(
2023						    id_priv->cma_dev->device)];
2024			cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
2025					  gid_type);
2026			cma_igmp_send(ndev, &mgid, false);
2027		}
2028		dev_put(ndev);
2029
2030		cancel_work_sync(&mc->iboe_join.work);
2031	}
2032	kfree(mc);
2033}
2034
2035static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
2036{
2037	struct cma_multicast *mc;
2038
2039	while (!list_empty(&id_priv->mc_list)) {
2040		mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
2041				      list);
2042		list_del(&mc->list);
2043		destroy_mc(id_priv, mc);
2044	}
2045}
2046
2047static void _destroy_id(struct rdma_id_private *id_priv,
2048			enum rdma_cm_state state)
2049{
2050	cma_cancel_operation(id_priv, state);
2051
2052	rdma_restrack_del(&id_priv->res);
2053	cma_remove_id_from_tree(id_priv);
2054	if (id_priv->cma_dev) {
2055		if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
2056			if (id_priv->cm_id.ib)
2057				ib_destroy_cm_id(id_priv->cm_id.ib);
2058		} else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
2059			if (id_priv->cm_id.iw)
2060				iw_destroy_cm_id(id_priv->cm_id.iw);
2061		}
2062		cma_leave_mc_groups(id_priv);
2063		cma_release_dev(id_priv);
2064	}
2065
2066	cma_release_port(id_priv);
2067	cma_id_put(id_priv);
2068	wait_for_completion(&id_priv->comp);
2069
2070	if (id_priv->internal_id)
2071		cma_id_put(id_priv->id.context);
2072
2073	kfree(id_priv->id.route.path_rec);
2074	kfree(id_priv->id.route.path_rec_inbound);
2075	kfree(id_priv->id.route.path_rec_outbound);
2076
2077	put_net(id_priv->id.route.addr.dev_addr.net);
2078	kfree(id_priv);
2079}
2080
2081/*
2082 * destroy an ID from within the handler_mutex. This ensures that no other
2083 * handlers can start running concurrently.
2084 */
2085static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
2086	__releases(&idprv->handler_mutex)
2087{
2088	enum rdma_cm_state state;
2089	unsigned long flags;
2090
2091	trace_cm_id_destroy(id_priv);
2092
2093	/*
2094	 * Setting the state to destroyed under the handler mutex provides a
2095	 * fence against calling handler callbacks. If this is invoked due to
2096	 * the failure of a handler callback then it guarentees that no future
2097	 * handlers will be called.
2098	 */
2099	lockdep_assert_held(&id_priv->handler_mutex);
2100	spin_lock_irqsave(&id_priv->lock, flags);
2101	state = id_priv->state;
2102	id_priv->state = RDMA_CM_DESTROYING;
2103	spin_unlock_irqrestore(&id_priv->lock, flags);
2104	mutex_unlock(&id_priv->handler_mutex);
2105	_destroy_id(id_priv, state);
2106}
2107
2108void rdma_destroy_id(struct rdma_cm_id *id)
2109{
2110	struct rdma_id_private *id_priv =
2111		container_of(id, struct rdma_id_private, id);
2112
2113	mutex_lock(&id_priv->handler_mutex);
2114	destroy_id_handler_unlock(id_priv);
2115}
2116EXPORT_SYMBOL(rdma_destroy_id);
2117
2118static int cma_rep_recv(struct rdma_id_private *id_priv)
2119{
2120	int ret;
2121
2122	ret = cma_modify_qp_rtr(id_priv, NULL);
2123	if (ret)
2124		goto reject;
2125
2126	ret = cma_modify_qp_rts(id_priv, NULL);
2127	if (ret)
2128		goto reject;
2129
2130	trace_cm_send_rtu(id_priv);
2131	ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
2132	if (ret)
2133		goto reject;
2134
2135	return 0;
2136reject:
2137	pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
2138	cma_modify_qp_err(id_priv);
2139	trace_cm_send_rej(id_priv);
2140	ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
2141		       NULL, 0, NULL, 0);
2142	return ret;
2143}
2144
2145static void cma_set_rep_event_data(struct rdma_cm_event *event,
2146				   const struct ib_cm_rep_event_param *rep_data,
2147				   void *private_data)
2148{
2149	event->param.conn.private_data = private_data;
2150	event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
2151	event->param.conn.responder_resources = rep_data->responder_resources;
2152	event->param.conn.initiator_depth = rep_data->initiator_depth;
2153	event->param.conn.flow_control = rep_data->flow_control;
2154	event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
2155	event->param.conn.srq = rep_data->srq;
2156	event->param.conn.qp_num = rep_data->remote_qpn;
2157
2158	event->ece.vendor_id = rep_data->ece.vendor_id;
2159	event->ece.attr_mod = rep_data->ece.attr_mod;
2160}
2161
2162static int cma_cm_event_handler(struct rdma_id_private *id_priv,
2163				struct rdma_cm_event *event)
2164{
2165	int ret;
2166
2167	lockdep_assert_held(&id_priv->handler_mutex);
2168
2169	trace_cm_event_handler(id_priv, event);
2170	ret = id_priv->id.event_handler(&id_priv->id, event);
2171	trace_cm_event_done(id_priv, event, ret);
2172	return ret;
2173}
2174
2175static int cma_ib_handler(struct ib_cm_id *cm_id,
2176			  const struct ib_cm_event *ib_event)
2177{
2178	struct rdma_id_private *id_priv = cm_id->context;
2179	struct rdma_cm_event event = {};
2180	enum rdma_cm_state state;
2181	int ret;
2182
2183	mutex_lock(&id_priv->handler_mutex);
2184	state = READ_ONCE(id_priv->state);
2185	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
2186	     state != RDMA_CM_CONNECT) ||
2187	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
2188	     state != RDMA_CM_DISCONNECT))
2189		goto out;
2190
2191	switch (ib_event->event) {
2192	case IB_CM_REQ_ERROR:
2193	case IB_CM_REP_ERROR:
2194		event.event = RDMA_CM_EVENT_UNREACHABLE;
2195		event.status = -ETIMEDOUT;
2196		break;
2197	case IB_CM_REP_RECEIVED:
2198		if (state == RDMA_CM_CONNECT &&
2199		    (id_priv->id.qp_type != IB_QPT_UD)) {
2200			trace_cm_send_mra(id_priv);
2201			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2202		}
2203		if (id_priv->id.qp) {
2204			event.status = cma_rep_recv(id_priv);
2205			event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
2206						     RDMA_CM_EVENT_ESTABLISHED;
2207		} else {
2208			event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
2209		}
2210		cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
2211				       ib_event->private_data);
2212		break;
2213	case IB_CM_RTU_RECEIVED:
2214	case IB_CM_USER_ESTABLISHED:
2215		event.event = RDMA_CM_EVENT_ESTABLISHED;
2216		break;
2217	case IB_CM_DREQ_ERROR:
2218		event.status = -ETIMEDOUT;
2219		fallthrough;
2220	case IB_CM_DREQ_RECEIVED:
2221	case IB_CM_DREP_RECEIVED:
2222		if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
2223				   RDMA_CM_DISCONNECT))
2224			goto out;
2225		event.event = RDMA_CM_EVENT_DISCONNECTED;
2226		break;
2227	case IB_CM_TIMEWAIT_EXIT:
2228		event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
2229		break;
2230	case IB_CM_MRA_RECEIVED:
2231		/* ignore event */
2232		goto out;
2233	case IB_CM_REJ_RECEIVED:
2234		pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
2235										ib_event->param.rej_rcvd.reason));
2236		cma_modify_qp_err(id_priv);
2237		event.status = ib_event->param.rej_rcvd.reason;
2238		event.event = RDMA_CM_EVENT_REJECTED;
2239		event.param.conn.private_data = ib_event->private_data;
2240		event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
2241		break;
2242	default:
2243		pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2244		       ib_event->event);
2245		goto out;
2246	}
2247
2248	ret = cma_cm_event_handler(id_priv, &event);
2249	if (ret) {
2250		/* Destroy the CM ID by returning a non-zero value. */
2251		id_priv->cm_id.ib = NULL;
2252		destroy_id_handler_unlock(id_priv);
2253		return ret;
2254	}
2255out:
2256	mutex_unlock(&id_priv->handler_mutex);
2257	return 0;
2258}
2259
2260static struct rdma_id_private *
2261cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
2262		   const struct ib_cm_event *ib_event,
2263		   struct net_device *net_dev)
2264{
2265	struct rdma_id_private *listen_id_priv;
2266	struct rdma_id_private *id_priv;
2267	struct rdma_cm_id *id;
2268	struct rdma_route *rt;
2269	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2270	struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
2271	const __be64 service_id =
2272		ib_event->param.req_rcvd.primary_path->service_id;
2273	int ret;
2274
2275	listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2276	id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
2277				   listen_id->event_handler, listen_id->context,
2278				   listen_id->ps,
2279				   ib_event->param.req_rcvd.qp_type,
2280				   listen_id_priv);
2281	if (IS_ERR(id_priv))
2282		return NULL;
2283
2284	id = &id_priv->id;
2285	if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2286			      (struct sockaddr *)&id->route.addr.dst_addr,
2287			      listen_id, ib_event, ss_family, service_id))
2288		goto err;
2289
2290	rt = &id->route;
2291	rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
2292	rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
2293				     sizeof(*rt->path_rec), GFP_KERNEL);
2294	if (!rt->path_rec)
2295		goto err;
2296
2297	rt->path_rec[0] = *path;
2298	if (rt->num_pri_alt_paths == 2)
2299		rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
2300
2301	if (net_dev) {
2302		rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
2303	} else {
2304		if (!cma_protocol_roce(listen_id) &&
2305		    cma_any_addr(cma_src_addr(id_priv))) {
2306			rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
2307			rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
2308			ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
2309		} else if (!cma_any_addr(cma_src_addr(id_priv))) {
2310			ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
2311			if (ret)
2312				goto err;
2313		}
2314	}
2315	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
2316
2317	id_priv->state = RDMA_CM_CONNECT;
2318	return id_priv;
2319
2320err:
2321	rdma_destroy_id(id);
2322	return NULL;
2323}
2324
2325static struct rdma_id_private *
2326cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
2327		  const struct ib_cm_event *ib_event,
2328		  struct net_device *net_dev)
2329{
2330	const struct rdma_id_private *listen_id_priv;
2331	struct rdma_id_private *id_priv;
2332	struct rdma_cm_id *id;
2333	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2334	struct net *net = listen_id->route.addr.dev_addr.net;
2335	int ret;
2336
2337	listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2338	id_priv = __rdma_create_id(net, listen_id->event_handler,
2339				   listen_id->context, listen_id->ps, IB_QPT_UD,
2340				   listen_id_priv);
2341	if (IS_ERR(id_priv))
2342		return NULL;
2343
2344	id = &id_priv->id;
2345	if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2346			      (struct sockaddr *)&id->route.addr.dst_addr,
2347			      listen_id, ib_event, ss_family,
2348			      ib_event->param.sidr_req_rcvd.service_id))
2349		goto err;
2350
2351	if (net_dev) {
2352		rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
2353	} else {
2354		if (!cma_any_addr(cma_src_addr(id_priv))) {
2355			ret = cma_translate_addr(cma_src_addr(id_priv),
2356						 &id->route.addr.dev_addr);
2357			if (ret)
2358				goto err;
2359		}
2360	}
2361
2362	id_priv->state = RDMA_CM_CONNECT;
2363	return id_priv;
2364err:
2365	rdma_destroy_id(id);
2366	return NULL;
2367}
2368
2369static void cma_set_req_event_data(struct rdma_cm_event *event,
2370				   const struct ib_cm_req_event_param *req_data,
2371				   void *private_data, int offset)
2372{
2373	event->param.conn.private_data = private_data + offset;
2374	event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
2375	event->param.conn.responder_resources = req_data->responder_resources;
2376	event->param.conn.initiator_depth = req_data->initiator_depth;
2377	event->param.conn.flow_control = req_data->flow_control;
2378	event->param.conn.retry_count = req_data->retry_count;
2379	event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
2380	event->param.conn.srq = req_data->srq;
2381	event->param.conn.qp_num = req_data->remote_qpn;
2382
2383	event->ece.vendor_id = req_data->ece.vendor_id;
2384	event->ece.attr_mod = req_data->ece.attr_mod;
2385}
2386
2387static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
2388				    const struct ib_cm_event *ib_event)
2389{
2390	return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
2391		 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
2392		((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
2393		 (id->qp_type == IB_QPT_UD)) ||
2394		(!id->qp_type));
2395}
2396
2397static int cma_ib_req_handler(struct ib_cm_id *cm_id,
2398			      const struct ib_cm_event *ib_event)
2399{
2400	struct rdma_id_private *listen_id, *conn_id = NULL;
2401	struct rdma_cm_event event = {};
2402	struct cma_req_info req = {};
2403	struct net_device *net_dev;
2404	u8 offset;
2405	int ret;
2406
2407	listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
2408	if (IS_ERR(listen_id))
2409		return PTR_ERR(listen_id);
2410
2411	trace_cm_req_handler(listen_id, ib_event->event);
2412	if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
2413		ret = -EINVAL;
2414		goto net_dev_put;
2415	}
2416
2417	mutex_lock(&listen_id->handler_mutex);
2418	if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
2419		ret = -ECONNABORTED;
2420		goto err_unlock;
2421	}
2422
2423	offset = cma_user_data_offset(listen_id);
2424	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2425	if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
2426		conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
2427		event.param.ud.private_data = ib_event->private_data + offset;
2428		event.param.ud.private_data_len =
2429				IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
2430	} else {
2431		conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
2432		cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
2433				       ib_event->private_data, offset);
2434	}
2435	if (!conn_id) {
2436		ret = -ENOMEM;
2437		goto err_unlock;
2438	}
2439
2440	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2441	ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
2442	if (ret) {
2443		destroy_id_handler_unlock(conn_id);
2444		goto err_unlock;
2445	}
2446
2447	conn_id->cm_id.ib = cm_id;
2448	cm_id->context = conn_id;
2449	cm_id->cm_handler = cma_ib_handler;
2450
2451	ret = cma_cm_event_handler(conn_id, &event);
2452	if (ret) {
2453		/* Destroy the CM ID by returning a non-zero value. */
2454		conn_id->cm_id.ib = NULL;
2455		mutex_unlock(&listen_id->handler_mutex);
2456		destroy_id_handler_unlock(conn_id);
2457		goto net_dev_put;
2458	}
2459
2460	if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
2461	    conn_id->id.qp_type != IB_QPT_UD) {
2462		trace_cm_send_mra(cm_id->context);
2463		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2464	}
2465	mutex_unlock(&conn_id->handler_mutex);
2466
2467err_unlock:
2468	mutex_unlock(&listen_id->handler_mutex);
2469
2470net_dev_put:
2471	dev_put(net_dev);
2472
2473	return ret;
2474}
2475
2476__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2477{
2478	if (addr->sa_family == AF_IB)
2479		return ((struct sockaddr_ib *) addr)->sib_sid;
2480
2481	return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2482}
2483EXPORT_SYMBOL(rdma_get_service_id);
2484
2485void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
2486		    union ib_gid *dgid)
2487{
2488	struct rdma_addr *addr = &cm_id->route.addr;
2489
2490	if (!cm_id->device) {
2491		if (sgid)
2492			memset(sgid, 0, sizeof(*sgid));
2493		if (dgid)
2494			memset(dgid, 0, sizeof(*dgid));
2495		return;
2496	}
2497
2498	if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
2499		if (sgid)
2500			rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
2501		if (dgid)
2502			rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
2503	} else {
2504		if (sgid)
2505			rdma_addr_get_sgid(&addr->dev_addr, sgid);
2506		if (dgid)
2507			rdma_addr_get_dgid(&addr->dev_addr, dgid);
2508	}
2509}
2510EXPORT_SYMBOL(rdma_read_gids);
2511
2512static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2513{
2514	struct rdma_id_private *id_priv = iw_id->context;
2515	struct rdma_cm_event event = {};
2516	int ret = 0;
2517	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2518	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2519
2520	mutex_lock(&id_priv->handler_mutex);
2521	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
2522		goto out;
2523
2524	switch (iw_event->event) {
2525	case IW_CM_EVENT_CLOSE:
2526		event.event = RDMA_CM_EVENT_DISCONNECTED;
2527		break;
2528	case IW_CM_EVENT_CONNECT_REPLY:
2529		memcpy(cma_src_addr(id_priv), laddr,
2530		       rdma_addr_size(laddr));
2531		memcpy(cma_dst_addr(id_priv), raddr,
2532		       rdma_addr_size(raddr));
2533		switch (iw_event->status) {
2534		case 0:
2535			event.event = RDMA_CM_EVENT_ESTABLISHED;
2536			event.param.conn.initiator_depth = iw_event->ird;
2537			event.param.conn.responder_resources = iw_event->ord;
2538			break;
2539		case -ECONNRESET:
2540		case -ECONNREFUSED:
2541			event.event = RDMA_CM_EVENT_REJECTED;
2542			break;
2543		case -ETIMEDOUT:
2544			event.event = RDMA_CM_EVENT_UNREACHABLE;
2545			break;
2546		default:
2547			event.event = RDMA_CM_EVENT_CONNECT_ERROR;
2548			break;
2549		}
2550		break;
2551	case IW_CM_EVENT_ESTABLISHED:
2552		event.event = RDMA_CM_EVENT_ESTABLISHED;
2553		event.param.conn.initiator_depth = iw_event->ird;
2554		event.param.conn.responder_resources = iw_event->ord;
2555		break;
2556	default:
2557		goto out;
2558	}
2559
2560	event.status = iw_event->status;
2561	event.param.conn.private_data = iw_event->private_data;
2562	event.param.conn.private_data_len = iw_event->private_data_len;
2563	ret = cma_cm_event_handler(id_priv, &event);
2564	if (ret) {
2565		/* Destroy the CM ID by returning a non-zero value. */
2566		id_priv->cm_id.iw = NULL;
2567		destroy_id_handler_unlock(id_priv);
2568		return ret;
2569	}
2570
2571out:
2572	mutex_unlock(&id_priv->handler_mutex);
2573	return ret;
2574}
2575
2576static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2577			       struct iw_cm_event *iw_event)
2578{
2579	struct rdma_id_private *listen_id, *conn_id;
2580	struct rdma_cm_event event = {};
2581	int ret = -ECONNABORTED;
2582	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2583	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2584
2585	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2586	event.param.conn.private_data = iw_event->private_data;
2587	event.param.conn.private_data_len = iw_event->private_data_len;
2588	event.param.conn.initiator_depth = iw_event->ird;
2589	event.param.conn.responder_resources = iw_event->ord;
2590
2591	listen_id = cm_id->context;
2592
2593	mutex_lock(&listen_id->handler_mutex);
2594	if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
2595		goto out;
2596
2597	/* Create a new RDMA id for the new IW CM ID */
2598	conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2599				   listen_id->id.event_handler,
2600				   listen_id->id.context, RDMA_PS_TCP,
2601				   IB_QPT_RC, listen_id);
2602	if (IS_ERR(conn_id)) {
2603		ret = -ENOMEM;
2604		goto out;
2605	}
2606	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2607	conn_id->state = RDMA_CM_CONNECT;
2608
2609	ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2610	if (ret) {
2611		mutex_unlock(&listen_id->handler_mutex);
2612		destroy_id_handler_unlock(conn_id);
2613		return ret;
2614	}
2615
2616	ret = cma_iw_acquire_dev(conn_id, listen_id);
2617	if (ret) {
2618		mutex_unlock(&listen_id->handler_mutex);
2619		destroy_id_handler_unlock(conn_id);
2620		return ret;
2621	}
2622
2623	conn_id->cm_id.iw = cm_id;
2624	cm_id->context = conn_id;
2625	cm_id->cm_handler = cma_iw_handler;
2626
2627	memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
2628	memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
2629
2630	ret = cma_cm_event_handler(conn_id, &event);
2631	if (ret) {
2632		/* User wants to destroy the CM ID */
2633		conn_id->cm_id.iw = NULL;
2634		mutex_unlock(&listen_id->handler_mutex);
2635		destroy_id_handler_unlock(conn_id);
2636		return ret;
2637	}
2638
2639	mutex_unlock(&conn_id->handler_mutex);
2640
2641out:
2642	mutex_unlock(&listen_id->handler_mutex);
2643	return ret;
2644}
2645
2646static int cma_ib_listen(struct rdma_id_private *id_priv)
2647{
2648	struct sockaddr *addr;
2649	struct ib_cm_id	*id;
2650	__be64 svc_id;
2651
2652	addr = cma_src_addr(id_priv);
2653	svc_id = rdma_get_service_id(&id_priv->id, addr);
2654	id = ib_cm_insert_listen(id_priv->id.device,
2655				 cma_ib_req_handler, svc_id);
2656	if (IS_ERR(id))
2657		return PTR_ERR(id);
2658	id_priv->cm_id.ib = id;
2659
2660	return 0;
2661}
2662
2663static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
2664{
2665	int ret;
2666	struct iw_cm_id	*id;
2667
2668	id = iw_create_cm_id(id_priv->id.device,
2669			     iw_conn_req_handler,
2670			     id_priv);
2671	if (IS_ERR(id))
2672		return PTR_ERR(id);
2673
2674	mutex_lock(&id_priv->qp_mutex);
2675	id->tos = id_priv->tos;
2676	id->tos_set = id_priv->tos_set;
2677	mutex_unlock(&id_priv->qp_mutex);
2678	id->afonly = id_priv->afonly;
2679	id_priv->cm_id.iw = id;
2680
2681	memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2682	       rdma_addr_size(cma_src_addr(id_priv)));
2683
2684	ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2685
2686	if (ret) {
2687		iw_destroy_cm_id(id_priv->cm_id.iw);
2688		id_priv->cm_id.iw = NULL;
2689	}
2690
2691	return ret;
2692}
2693
2694static int cma_listen_handler(struct rdma_cm_id *id,
2695			      struct rdma_cm_event *event)
2696{
2697	struct rdma_id_private *id_priv = id->context;
2698
2699	/* Listening IDs are always destroyed on removal */
2700	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
2701		return -1;
2702
2703	id->context = id_priv->id.context;
2704	id->event_handler = id_priv->id.event_handler;
2705	trace_cm_event_handler(id_priv, event);
2706	return id_priv->id.event_handler(id, event);
2707}
2708
2709static int cma_listen_on_dev(struct rdma_id_private *id_priv,
2710			     struct cma_device *cma_dev,
2711			     struct rdma_id_private **to_destroy)
2712{
2713	struct rdma_id_private *dev_id_priv;
2714	struct net *net = id_priv->id.route.addr.dev_addr.net;
2715	int ret;
2716
2717	lockdep_assert_held(&lock);
2718
2719	*to_destroy = NULL;
2720	if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2721		return 0;
2722
2723	dev_id_priv =
2724		__rdma_create_id(net, cma_listen_handler, id_priv,
2725				 id_priv->id.ps, id_priv->id.qp_type, id_priv);
2726	if (IS_ERR(dev_id_priv))
2727		return PTR_ERR(dev_id_priv);
2728
2729	dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2730	memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2731	       rdma_addr_size(cma_src_addr(id_priv)));
2732
2733	_cma_attach_to_dev(dev_id_priv, cma_dev);
2734	rdma_restrack_add(&dev_id_priv->res);
2735	cma_id_get(id_priv);
2736	dev_id_priv->internal_id = 1;
2737	dev_id_priv->afonly = id_priv->afonly;
2738	mutex_lock(&id_priv->qp_mutex);
2739	dev_id_priv->tos_set = id_priv->tos_set;
2740	dev_id_priv->tos = id_priv->tos;
2741	mutex_unlock(&id_priv->qp_mutex);
2742
2743	ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
2744	if (ret)
2745		goto err_listen;
2746	list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
2747	return 0;
2748err_listen:
2749	/* Caller must destroy this after releasing lock */
2750	*to_destroy = dev_id_priv;
2751	dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
2752	return ret;
2753}
2754
2755static int cma_listen_on_all(struct rdma_id_private *id_priv)
2756{
2757	struct rdma_id_private *to_destroy;
2758	struct cma_device *cma_dev;
2759	int ret;
2760
2761	mutex_lock(&lock);
2762	list_add_tail(&id_priv->listen_any_item, &listen_any_list);
2763	list_for_each_entry(cma_dev, &dev_list, list) {
2764		ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
2765		if (ret) {
2766			/* Prevent racing with cma_process_remove() */
2767			if (to_destroy)
2768				list_del_init(&to_destroy->device_item);
2769			goto err_listen;
2770		}
2771	}
2772	mutex_unlock(&lock);
2773	return 0;
2774
2775err_listen:
2776	_cma_cancel_listens(id_priv);
2777	mutex_unlock(&lock);
2778	if (to_destroy)
2779		rdma_destroy_id(&to_destroy->id);
2780	return ret;
2781}
2782
2783void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2784{
2785	struct rdma_id_private *id_priv;
2786
2787	id_priv = container_of(id, struct rdma_id_private, id);
2788	mutex_lock(&id_priv->qp_mutex);
2789	id_priv->tos = (u8) tos;
2790	id_priv->tos_set = true;
2791	mutex_unlock(&id_priv->qp_mutex);
2792}
2793EXPORT_SYMBOL(rdma_set_service_type);
2794
2795/**
2796 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2797 *                          with a connection identifier.
2798 * @id: Communication identifier to associated with service type.
2799 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2800 *
2801 * This function should be called before rdma_connect() on active side,
2802 * and on passive side before rdma_accept(). It is applicable to primary
2803 * path only. The timeout will affect the local side of the QP, it is not
2804 * negotiated with remote side and zero disables the timer. In case it is
2805 * set before rdma_resolve_route, the value will also be used to determine
2806 * PacketLifeTime for RoCE.
2807 *
2808 * Return: 0 for success
2809 */
2810int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
2811{
2812	struct rdma_id_private *id_priv;
2813
2814	if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
2815		return -EINVAL;
2816
2817	id_priv = container_of(id, struct rdma_id_private, id);
2818	mutex_lock(&id_priv->qp_mutex);
2819	id_priv->timeout = timeout;
2820	id_priv->timeout_set = true;
2821	mutex_unlock(&id_priv->qp_mutex);
2822
2823	return 0;
2824}
2825EXPORT_SYMBOL(rdma_set_ack_timeout);
2826
2827/**
2828 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2829 *			      QP associated with a connection identifier.
2830 * @id: Communication identifier to associated with service type.
2831 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2832 *		   Timer Field" in the IBTA specification.
2833 *
2834 * This function should be called before rdma_connect() on active
2835 * side, and on passive side before rdma_accept(). The timer value
2836 * will be associated with the local QP. When it receives a send it is
2837 * not read to handle, typically if the receive queue is empty, an RNR
2838 * Retry NAK is returned to the requester with the min_rnr_timer
2839 * encoded. The requester will then wait at least the time specified
2840 * in the NAK before retrying. The default is zero, which translates
2841 * to a minimum RNR Timer value of 655 ms.
2842 *
2843 * Return: 0 for success
2844 */
2845int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
2846{
2847	struct rdma_id_private *id_priv;
2848
2849	/* It is a five-bit value */
2850	if (min_rnr_timer & 0xe0)
2851		return -EINVAL;
2852
2853	if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
2854		return -EINVAL;
2855
2856	id_priv = container_of(id, struct rdma_id_private, id);
2857	mutex_lock(&id_priv->qp_mutex);
2858	id_priv->min_rnr_timer = min_rnr_timer;
2859	id_priv->min_rnr_timer_set = true;
2860	mutex_unlock(&id_priv->qp_mutex);
2861
2862	return 0;
2863}
2864EXPORT_SYMBOL(rdma_set_min_rnr_timer);
2865
2866static int route_set_path_rec_inbound(struct cma_work *work,
2867				      struct sa_path_rec *path_rec)
2868{
2869	struct rdma_route *route = &work->id->id.route;
2870
2871	if (!route->path_rec_inbound) {
2872		route->path_rec_inbound =
2873			kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
2874		if (!route->path_rec_inbound)
2875			return -ENOMEM;
2876	}
2877
2878	*route->path_rec_inbound = *path_rec;
2879	return 0;
2880}
2881
2882static int route_set_path_rec_outbound(struct cma_work *work,
2883				       struct sa_path_rec *path_rec)
2884{
2885	struct rdma_route *route = &work->id->id.route;
2886
2887	if (!route->path_rec_outbound) {
2888		route->path_rec_outbound =
2889			kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
2890		if (!route->path_rec_outbound)
2891			return -ENOMEM;
2892	}
2893
2894	*route->path_rec_outbound = *path_rec;
2895	return 0;
2896}
2897
2898static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2899			      unsigned int num_prs, void *context)
2900{
2901	struct cma_work *work = context;
2902	struct rdma_route *route;
2903	int i;
2904
2905	route = &work->id->id.route;
2906
2907	if (status)
2908		goto fail;
2909
2910	for (i = 0; i < num_prs; i++) {
2911		if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
2912			*route->path_rec = path_rec[i];
2913		else if (path_rec[i].flags & IB_PATH_INBOUND)
2914			status = route_set_path_rec_inbound(work, &path_rec[i]);
2915		else if (path_rec[i].flags & IB_PATH_OUTBOUND)
2916			status = route_set_path_rec_outbound(work,
2917							     &path_rec[i]);
2918		else
2919			status = -EINVAL;
2920
2921		if (status)
2922			goto fail;
2923	}
2924
2925	route->num_pri_alt_paths = 1;
2926	queue_work(cma_wq, &work->work);
2927	return;
2928
2929fail:
2930	work->old_state = RDMA_CM_ROUTE_QUERY;
2931	work->new_state = RDMA_CM_ADDR_RESOLVED;
2932	work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
2933	work->event.status = status;
2934	pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2935			     status);
2936	queue_work(cma_wq, &work->work);
2937}
2938
2939static int cma_query_ib_route(struct rdma_id_private *id_priv,
2940			      unsigned long timeout_ms, struct cma_work *work)
2941{
2942	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2943	struct sa_path_rec path_rec;
2944	ib_sa_comp_mask comp_mask;
2945	struct sockaddr_in6 *sin6;
2946	struct sockaddr_ib *sib;
2947
2948	memset(&path_rec, 0, sizeof path_rec);
2949
2950	if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
2951		path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
2952	else
2953		path_rec.rec_type = SA_PATH_REC_TYPE_IB;
2954	rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2955	rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2956	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2957	path_rec.numb_path = 1;
2958	path_rec.reversible = 1;
2959	path_rec.service_id = rdma_get_service_id(&id_priv->id,
2960						  cma_dst_addr(id_priv));
2961
2962	comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2963		    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2964		    IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2965
2966	switch (cma_family(id_priv)) {
2967	case AF_INET:
2968		path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2969		comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2970		break;
2971	case AF_INET6:
2972		sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2973		path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2974		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2975		break;
2976	case AF_IB:
2977		sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2978		path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2979		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2980		break;
2981	}
2982
2983	id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2984					       id_priv->id.port_num, &path_rec,
2985					       comp_mask, timeout_ms,
2986					       GFP_KERNEL, cma_query_handler,
2987					       work, &id_priv->query);
2988
2989	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2990}
2991
2992static void cma_iboe_join_work_handler(struct work_struct *work)
2993{
2994	struct cma_multicast *mc =
2995		container_of(work, struct cma_multicast, iboe_join.work);
2996	struct rdma_cm_event *event = &mc->iboe_join.event;
2997	struct rdma_id_private *id_priv = mc->id_priv;
2998	int ret;
2999
3000	mutex_lock(&id_priv->handler_mutex);
3001	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
3002	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
3003		goto out_unlock;
3004
3005	ret = cma_cm_event_handler(id_priv, event);
3006	WARN_ON(ret);
3007
3008out_unlock:
3009	mutex_unlock(&id_priv->handler_mutex);
3010	if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
3011		rdma_destroy_ah_attr(&event->param.ud.ah_attr);
3012}
3013
3014static void cma_work_handler(struct work_struct *_work)
3015{
3016	struct cma_work *work = container_of(_work, struct cma_work, work);
3017	struct rdma_id_private *id_priv = work->id;
3018
3019	mutex_lock(&id_priv->handler_mutex);
3020	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
3021	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
3022		goto out_unlock;
3023	if (work->old_state != 0 || work->new_state != 0) {
3024		if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
3025			goto out_unlock;
3026	}
3027
3028	if (cma_cm_event_handler(id_priv, &work->event)) {
3029		cma_id_put(id_priv);
3030		destroy_id_handler_unlock(id_priv);
3031		goto out_free;
3032	}
3033
3034out_unlock:
3035	mutex_unlock(&id_priv->handler_mutex);
3036	cma_id_put(id_priv);
3037out_free:
3038	if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
3039		rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
3040	kfree(work);
3041}
3042
3043static void cma_init_resolve_route_work(struct cma_work *work,
3044					struct rdma_id_private *id_priv)
3045{
3046	work->id = id_priv;
3047	INIT_WORK(&work->work, cma_work_handler);
3048	work->old_state = RDMA_CM_ROUTE_QUERY;
3049	work->new_state = RDMA_CM_ROUTE_RESOLVED;
3050	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
3051}
3052
3053static void enqueue_resolve_addr_work(struct cma_work *work,
3054				      struct rdma_id_private *id_priv)
3055{
3056	/* Balances with cma_id_put() in cma_work_handler */
3057	cma_id_get(id_priv);
3058
3059	work->id = id_priv;
3060	INIT_WORK(&work->work, cma_work_handler);
3061	work->old_state = RDMA_CM_ADDR_QUERY;
3062	work->new_state = RDMA_CM_ADDR_RESOLVED;
3063	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3064
3065	queue_work(cma_wq, &work->work);
3066}
3067
3068static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
3069				unsigned long timeout_ms)
3070{
3071	struct rdma_route *route = &id_priv->id.route;
3072	struct cma_work *work;
3073	int ret;
3074
3075	work = kzalloc(sizeof *work, GFP_KERNEL);
3076	if (!work)
3077		return -ENOMEM;
3078
3079	cma_init_resolve_route_work(work, id_priv);
3080
3081	if (!route->path_rec)
3082		route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
3083	if (!route->path_rec) {
3084		ret = -ENOMEM;
3085		goto err1;
3086	}
3087
3088	ret = cma_query_ib_route(id_priv, timeout_ms, work);
3089	if (ret)
3090		goto err2;
3091
3092	return 0;
3093err2:
3094	kfree(route->path_rec);
3095	route->path_rec = NULL;
3096err1:
3097	kfree(work);
3098	return ret;
3099}
3100
3101static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
3102					   unsigned long supported_gids,
3103					   enum ib_gid_type default_gid)
3104{
3105	if ((network_type == RDMA_NETWORK_IPV4 ||
3106	     network_type == RDMA_NETWORK_IPV6) &&
3107	    test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
3108		return IB_GID_TYPE_ROCE_UDP_ENCAP;
3109
3110	return default_gid;
3111}
3112
3113/*
3114 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
3115 * path record type based on GID type.
3116 * It also sets up other L2 fields which includes destination mac address
3117 * netdev ifindex, of the path record.
3118 * It returns the netdev of the bound interface for this path record entry.
3119 */
3120static struct net_device *
3121cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
3122{
3123	struct rdma_route *route = &id_priv->id.route;
3124	enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
3125	struct rdma_addr *addr = &route->addr;
3126	unsigned long supported_gids;
3127	struct net_device *ndev;
3128
3129	if (!addr->dev_addr.bound_dev_if)
3130		return NULL;
3131
3132	ndev = dev_get_by_index(addr->dev_addr.net,
3133				addr->dev_addr.bound_dev_if);
3134	if (!ndev)
3135		return NULL;
3136
3137	supported_gids = roce_gid_type_mask_support(id_priv->id.device,
3138						    id_priv->id.port_num);
3139	gid_type = cma_route_gid_type(addr->dev_addr.network,
3140				      supported_gids,
3141				      id_priv->gid_type);
3142	/* Use the hint from IP Stack to select GID Type */
3143	if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
3144		gid_type = ib_network_to_gid_type(addr->dev_addr.network);
3145	route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
3146
3147	route->path_rec->roce.route_resolved = true;
3148	sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
3149	return ndev;
3150}
3151
3152int rdma_set_ib_path(struct rdma_cm_id *id,
3153		     struct sa_path_rec *path_rec)
3154{
3155	struct rdma_id_private *id_priv;
3156	struct net_device *ndev;
3157	int ret;
3158
3159	id_priv = container_of(id, struct rdma_id_private, id);
3160	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3161			   RDMA_CM_ROUTE_RESOLVED))
3162		return -EINVAL;
3163
3164	id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
3165				     GFP_KERNEL);
3166	if (!id->route.path_rec) {
3167		ret = -ENOMEM;
3168		goto err;
3169	}
3170
3171	if (rdma_protocol_roce(id->device, id->port_num)) {
3172		ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3173		if (!ndev) {
3174			ret = -ENODEV;
3175			goto err_free;
3176		}
3177		dev_put(ndev);
3178	}
3179
3180	id->route.num_pri_alt_paths = 1;
3181	return 0;
3182
3183err_free:
3184	kfree(id->route.path_rec);
3185	id->route.path_rec = NULL;
3186err:
3187	cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
3188	return ret;
3189}
3190EXPORT_SYMBOL(rdma_set_ib_path);
3191
3192static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
3193{
3194	struct cma_work *work;
3195
3196	work = kzalloc(sizeof *work, GFP_KERNEL);
3197	if (!work)
3198		return -ENOMEM;
3199
3200	cma_init_resolve_route_work(work, id_priv);
3201	queue_work(cma_wq, &work->work);
3202	return 0;
3203}
3204
3205static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
3206{
3207	struct net_device *dev;
3208
3209	dev = vlan_dev_real_dev(vlan_ndev);
3210	if (dev->num_tc)
3211		return netdev_get_prio_tc_map(dev, prio);
3212
3213	return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
3214		VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3215}
3216
3217struct iboe_prio_tc_map {
3218	int input_prio;
3219	int output_tc;
3220	bool found;
3221};
3222
3223static int get_lower_vlan_dev_tc(struct net_device *dev,
3224				 struct netdev_nested_priv *priv)
3225{
3226	struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
3227
3228	if (is_vlan_dev(dev))
3229		map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
3230	else if (dev->num_tc)
3231		map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
3232	else
3233		map->output_tc = 0;
3234	/* We are interested only in first level VLAN device, so always
3235	 * return 1 to stop iterating over next level devices.
3236	 */
3237	map->found = true;
3238	return 1;
3239}
3240
3241static int iboe_tos_to_sl(struct net_device *ndev, int tos)
3242{
3243	struct iboe_prio_tc_map prio_tc_map = {};
3244	int prio = rt_tos2priority(tos);
3245	struct netdev_nested_priv priv;
3246
3247	/* If VLAN device, get it directly from the VLAN netdev */
3248	if (is_vlan_dev(ndev))
3249		return get_vlan_ndev_tc(ndev, prio);
3250
3251	prio_tc_map.input_prio = prio;
3252	priv.data = (void *)&prio_tc_map;
3253	rcu_read_lock();
3254	netdev_walk_all_lower_dev_rcu(ndev,
3255				      get_lower_vlan_dev_tc,
3256				      &priv);
3257	rcu_read_unlock();
3258	/* If map is found from lower device, use it; Otherwise
3259	 * continue with the current netdevice to get priority to tc map.
3260	 */
3261	if (prio_tc_map.found)
3262		return prio_tc_map.output_tc;
3263	else if (ndev->num_tc)
3264		return netdev_get_prio_tc_map(ndev, prio);
3265	else
3266		return 0;
3267}
3268
3269static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
3270{
3271	struct sockaddr_in6 *addr6;
3272	u16 dport, sport;
3273	u32 hash, fl;
3274
3275	addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
3276	fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
3277	if ((cma_family(id_priv) != AF_INET6) || !fl) {
3278		dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
3279		sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
3280		hash = (u32)sport * 31 + dport;
3281		fl = hash & IB_GRH_FLOWLABEL_MASK;
3282	}
3283
3284	return cpu_to_be32(fl);
3285}
3286
3287static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
3288{
3289	struct rdma_route *route = &id_priv->id.route;
3290	struct rdma_addr *addr = &route->addr;
3291	struct cma_work *work;
3292	int ret;
3293	struct net_device *ndev;
3294
3295	u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
3296					rdma_start_port(id_priv->cma_dev->device)];
3297	u8 tos;
3298
3299	mutex_lock(&id_priv->qp_mutex);
3300	tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
3301	mutex_unlock(&id_priv->qp_mutex);
3302
3303	work = kzalloc(sizeof *work, GFP_KERNEL);
3304	if (!work)
3305		return -ENOMEM;
3306
3307	route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
3308	if (!route->path_rec) {
3309		ret = -ENOMEM;
3310		goto err1;
3311	}
3312
3313	route->num_pri_alt_paths = 1;
3314
3315	ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3316	if (!ndev) {
3317		ret = -ENODEV;
3318		goto err2;
3319	}
3320
3321	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3322		    &route->path_rec->sgid);
3323	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
3324		    &route->path_rec->dgid);
3325
3326	if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
3327		/* TODO: get the hoplimit from the inet/inet6 device */
3328		route->path_rec->hop_limit = addr->dev_addr.hoplimit;
3329	else
3330		route->path_rec->hop_limit = 1;
3331	route->path_rec->reversible = 1;
3332	route->path_rec->pkey = cpu_to_be16(0xffff);
3333	route->path_rec->mtu_selector = IB_SA_EQ;
3334	route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
3335	route->path_rec->traffic_class = tos;
3336	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
3337	route->path_rec->rate_selector = IB_SA_EQ;
3338	route->path_rec->rate = IB_RATE_PORT_CURRENT;
3339	dev_put(ndev);
3340	route->path_rec->packet_life_time_selector = IB_SA_EQ;
3341	/* In case ACK timeout is set, use this value to calculate
3342	 * PacketLifeTime.  As per IBTA 12.7.34,
3343	 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
3344	 * Assuming a negligible local ACK delay, we can use
3345	 * PacketLifeTime = local ACK timeout/2
3346	 * as a reasonable approximation for RoCE networks.
3347	 */
3348	mutex_lock(&id_priv->qp_mutex);
3349	if (id_priv->timeout_set && id_priv->timeout)
3350		route->path_rec->packet_life_time = id_priv->timeout - 1;
3351	else
3352		route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
3353	mutex_unlock(&id_priv->qp_mutex);
3354
3355	if (!route->path_rec->mtu) {
3356		ret = -EINVAL;
3357		goto err2;
3358	}
3359
3360	if (rdma_protocol_roce_udp_encap(id_priv->id.device,
3361					 id_priv->id.port_num))
3362		route->path_rec->flow_label =
3363			cma_get_roce_udp_flow_label(id_priv);
3364
3365	cma_init_resolve_route_work(work, id_priv);
3366	queue_work(cma_wq, &work->work);
3367
3368	return 0;
3369
3370err2:
3371	kfree(route->path_rec);
3372	route->path_rec = NULL;
3373	route->num_pri_alt_paths = 0;
3374err1:
3375	kfree(work);
3376	return ret;
3377}
3378
3379int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
3380{
3381	struct rdma_id_private *id_priv;
3382	int ret;
3383
3384	if (!timeout_ms)
3385		return -EINVAL;
3386
3387	id_priv = container_of(id, struct rdma_id_private, id);
3388	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
3389		return -EINVAL;
3390
3391	cma_id_get(id_priv);
3392	if (rdma_cap_ib_sa(id->device, id->port_num))
3393		ret = cma_resolve_ib_route(id_priv, timeout_ms);
3394	else if (rdma_protocol_roce(id->device, id->port_num)) {
3395		ret = cma_resolve_iboe_route(id_priv);
3396		if (!ret)
3397			cma_add_id_to_tree(id_priv);
3398	}
3399	else if (rdma_protocol_iwarp(id->device, id->port_num))
3400		ret = cma_resolve_iw_route(id_priv);
3401	else
3402		ret = -ENOSYS;
3403
3404	if (ret)
3405		goto err;
3406
3407	return 0;
3408err:
3409	cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
3410	cma_id_put(id_priv);
3411	return ret;
3412}
3413EXPORT_SYMBOL(rdma_resolve_route);
3414
3415static void cma_set_loopback(struct sockaddr *addr)
3416{
3417	switch (addr->sa_family) {
3418	case AF_INET:
3419		((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
3420		break;
3421	case AF_INET6:
3422		ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
3423			      0, 0, 0, htonl(1));
3424		break;
3425	default:
3426		ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
3427			    0, 0, 0, htonl(1));
3428		break;
3429	}
3430}
3431
3432static int cma_bind_loopback(struct rdma_id_private *id_priv)
3433{
3434	struct cma_device *cma_dev, *cur_dev;
3435	union ib_gid gid;
3436	enum ib_port_state port_state;
3437	unsigned int p;
3438	u16 pkey;
3439	int ret;
3440
3441	cma_dev = NULL;
3442	mutex_lock(&lock);
3443	list_for_each_entry(cur_dev, &dev_list, list) {
3444		if (cma_family(id_priv) == AF_IB &&
3445		    !rdma_cap_ib_cm(cur_dev->device, 1))
3446			continue;
3447
3448		if (!cma_dev)
3449			cma_dev = cur_dev;
3450
3451		rdma_for_each_port (cur_dev->device, p) {
3452			if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
3453			    port_state == IB_PORT_ACTIVE) {
3454				cma_dev = cur_dev;
3455				goto port_found;
3456			}
3457		}
3458	}
3459
3460	if (!cma_dev) {
3461		ret = -ENODEV;
3462		goto out;
3463	}
3464
3465	p = 1;
3466
3467port_found:
3468	ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
3469	if (ret)
3470		goto out;
3471
3472	ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
3473	if (ret)
3474		goto out;
3475
3476	id_priv->id.route.addr.dev_addr.dev_type =
3477		(rdma_protocol_ib(cma_dev->device, p)) ?
3478		ARPHRD_INFINIBAND : ARPHRD_ETHER;
3479
3480	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3481	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
3482	id_priv->id.port_num = p;
3483	cma_attach_to_dev(id_priv, cma_dev);
3484	rdma_restrack_add(&id_priv->res);
3485	cma_set_loopback(cma_src_addr(id_priv));
3486out:
3487	mutex_unlock(&lock);
3488	return ret;
3489}
3490
3491static void addr_handler(int status, struct sockaddr *src_addr,
3492			 struct rdma_dev_addr *dev_addr, void *context)
3493{
3494	struct rdma_id_private *id_priv = context;
3495	struct rdma_cm_event event = {};
3496	struct sockaddr *addr;
3497	struct sockaddr_storage old_addr;
3498
3499	mutex_lock(&id_priv->handler_mutex);
3500	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
3501			   RDMA_CM_ADDR_RESOLVED))
3502		goto out;
3503
3504	/*
3505	 * Store the previous src address, so that if we fail to acquire
3506	 * matching rdma device, old address can be restored back, which helps
3507	 * to cancel the cma listen operation correctly.
3508	 */
3509	addr = cma_src_addr(id_priv);
3510	memcpy(&old_addr, addr, rdma_addr_size(addr));
3511	memcpy(addr, src_addr, rdma_addr_size(src_addr));
3512	if (!status && !id_priv->cma_dev) {
3513		status = cma_acquire_dev_by_src_ip(id_priv);
3514		if (status)
3515			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3516					     status);
3517		rdma_restrack_add(&id_priv->res);
3518	} else if (status) {
3519		pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
3520	}
3521
3522	if (status) {
3523		memcpy(addr, &old_addr,
3524		       rdma_addr_size((struct sockaddr *)&old_addr));
3525		if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3526				   RDMA_CM_ADDR_BOUND))
3527			goto out;
3528		event.event = RDMA_CM_EVENT_ADDR_ERROR;
3529		event.status = status;
3530	} else
3531		event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3532
3533	if (cma_cm_event_handler(id_priv, &event)) {
3534		destroy_id_handler_unlock(id_priv);
3535		return;
3536	}
3537out:
3538	mutex_unlock(&id_priv->handler_mutex);
3539}
3540
3541static int cma_resolve_loopback(struct rdma_id_private *id_priv)
3542{
3543	struct cma_work *work;
3544	union ib_gid gid;
3545	int ret;
3546
3547	work = kzalloc(sizeof *work, GFP_KERNEL);
3548	if (!work)
3549		return -ENOMEM;
3550
3551	if (!id_priv->cma_dev) {
3552		ret = cma_bind_loopback(id_priv);
3553		if (ret)
3554			goto err;
3555	}
3556
3557	rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3558	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
3559
3560	enqueue_resolve_addr_work(work, id_priv);
3561	return 0;
3562err:
3563	kfree(work);
3564	return ret;
3565}
3566
3567static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
3568{
3569	struct cma_work *work;
3570	int ret;
3571
3572	work = kzalloc(sizeof *work, GFP_KERNEL);
3573	if (!work)
3574		return -ENOMEM;
3575
3576	if (!id_priv->cma_dev) {
3577		ret = cma_resolve_ib_dev(id_priv);
3578		if (ret)
3579			goto err;
3580	}
3581
3582	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
3583		&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
3584
3585	enqueue_resolve_addr_work(work, id_priv);
3586	return 0;
3587err:
3588	kfree(work);
3589	return ret;
3590}
3591
3592int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3593{
3594	struct rdma_id_private *id_priv;
3595	unsigned long flags;
3596	int ret;
3597
3598	id_priv = container_of(id, struct rdma_id_private, id);
3599	spin_lock_irqsave(&id_priv->lock, flags);
3600	if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
3601	    id_priv->state == RDMA_CM_IDLE) {
3602		id_priv->reuseaddr = reuse;
3603		ret = 0;
3604	} else {
3605		ret = -EINVAL;
3606	}
3607	spin_unlock_irqrestore(&id_priv->lock, flags);
3608	return ret;
3609}
3610EXPORT_SYMBOL(rdma_set_reuseaddr);
3611
3612int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
3613{
3614	struct rdma_id_private *id_priv;
3615	unsigned long flags;
3616	int ret;
3617
3618	id_priv = container_of(id, struct rdma_id_private, id);
3619	spin_lock_irqsave(&id_priv->lock, flags);
3620	if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
3621		id_priv->options |= (1 << CMA_OPTION_AFONLY);
3622		id_priv->afonly = afonly;
3623		ret = 0;
3624	} else {
3625		ret = -EINVAL;
3626	}
3627	spin_unlock_irqrestore(&id_priv->lock, flags);
3628	return ret;
3629}
3630EXPORT_SYMBOL(rdma_set_afonly);
3631
3632static void cma_bind_port(struct rdma_bind_list *bind_list,
3633			  struct rdma_id_private *id_priv)
3634{
3635	struct sockaddr *addr;
3636	struct sockaddr_ib *sib;
3637	u64 sid, mask;
3638	__be16 port;
3639
3640	lockdep_assert_held(&lock);
3641
3642	addr = cma_src_addr(id_priv);
3643	port = htons(bind_list->port);
3644
3645	switch (addr->sa_family) {
3646	case AF_INET:
3647		((struct sockaddr_in *) addr)->sin_port = port;
3648		break;
3649	case AF_INET6:
3650		((struct sockaddr_in6 *) addr)->sin6_port = port;
3651		break;
3652	case AF_IB:
3653		sib = (struct sockaddr_ib *) addr;
3654		sid = be64_to_cpu(sib->sib_sid);
3655		mask = be64_to_cpu(sib->sib_sid_mask);
3656		sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
3657		sib->sib_sid_mask = cpu_to_be64(~0ULL);
3658		break;
3659	}
3660	id_priv->bind_list = bind_list;
3661	hlist_add_head(&id_priv->node, &bind_list->owners);
3662}
3663
3664static int cma_alloc_port(enum rdma_ucm_port_space ps,
3665			  struct rdma_id_private *id_priv, unsigned short snum)
3666{
3667	struct rdma_bind_list *bind_list;
3668	int ret;
3669
3670	lockdep_assert_held(&lock);
3671
3672	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
3673	if (!bind_list)
3674		return -ENOMEM;
3675
3676	ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3677			   snum);
3678	if (ret < 0)
3679		goto err;
3680
3681	bind_list->ps = ps;
3682	bind_list->port = snum;
3683	cma_bind_port(bind_list, id_priv);
3684	return 0;
3685err:
3686	kfree(bind_list);
3687	return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
3688}
3689
3690static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3691			      struct rdma_id_private *id_priv)
3692{
3693	struct rdma_id_private *cur_id;
3694	struct sockaddr  *daddr = cma_dst_addr(id_priv);
3695	struct sockaddr  *saddr = cma_src_addr(id_priv);
3696	__be16 dport = cma_port(daddr);
3697
3698	lockdep_assert_held(&lock);
3699
3700	hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3701		struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
3702		struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
3703		__be16 cur_dport = cma_port(cur_daddr);
3704
3705		if (id_priv == cur_id)
3706			continue;
3707
3708		/* different dest port -> unique */
3709		if (!cma_any_port(daddr) &&
3710		    !cma_any_port(cur_daddr) &&
3711		    (dport != cur_dport))
3712			continue;
3713
3714		/* different src address -> unique */
3715		if (!cma_any_addr(saddr) &&
3716		    !cma_any_addr(cur_saddr) &&
3717		    cma_addr_cmp(saddr, cur_saddr))
3718			continue;
3719
3720		/* different dst address -> unique */
3721		if (!cma_any_addr(daddr) &&
3722		    !cma_any_addr(cur_daddr) &&
3723		    cma_addr_cmp(daddr, cur_daddr))
3724			continue;
3725
3726		return -EADDRNOTAVAIL;
3727	}
3728	return 0;
3729}
3730
3731static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
3732			      struct rdma_id_private *id_priv)
3733{
3734	static unsigned int last_used_port;
3735	int low, high, remaining;
3736	unsigned int rover;
3737	struct net *net = id_priv->id.route.addr.dev_addr.net;
3738
3739	lockdep_assert_held(&lock);
3740
3741	inet_get_local_port_range(net, &low, &high);
3742	remaining = (high - low) + 1;
3743	rover = get_random_u32_inclusive(low, remaining + low - 1);
3744retry:
3745	if (last_used_port != rover) {
3746		struct rdma_bind_list *bind_list;
3747		int ret;
3748
3749		bind_list = cma_ps_find(net, ps, (unsigned short)rover);
3750
3751		if (!bind_list) {
3752			ret = cma_alloc_port(ps, id_priv, rover);
3753		} else {
3754			ret = cma_port_is_unique(bind_list, id_priv);
3755			if (!ret)
3756				cma_bind_port(bind_list, id_priv);
3757		}
3758		/*
3759		 * Remember previously used port number in order to avoid
3760		 * re-using same port immediately after it is closed.
3761		 */
3762		if (!ret)
3763			last_used_port = rover;
3764		if (ret != -EADDRNOTAVAIL)
3765			return ret;
3766	}
3767	if (--remaining) {
3768		rover++;
3769		if ((rover < low) || (rover > high))
3770			rover = low;
3771		goto retry;
3772	}
3773	return -EADDRNOTAVAIL;
3774}
3775
3776/*
3777 * Check that the requested port is available.  This is called when trying to
3778 * bind to a specific port, or when trying to listen on a bound port.  In
3779 * the latter case, the provided id_priv may already be on the bind_list, but
3780 * we still need to check that it's okay to start listening.
3781 */
3782static int cma_check_port(struct rdma_bind_list *bind_list,
3783			  struct rdma_id_private *id_priv, uint8_t reuseaddr)
3784{
3785	struct rdma_id_private *cur_id;
3786	struct sockaddr *addr, *cur_addr;
3787
3788	lockdep_assert_held(&lock);
3789
3790	addr = cma_src_addr(id_priv);
3791	hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3792		if (id_priv == cur_id)
3793			continue;
3794
3795		if (reuseaddr && cur_id->reuseaddr)
3796			continue;
3797
3798		cur_addr = cma_src_addr(cur_id);
3799		if (id_priv->afonly && cur_id->afonly &&
3800		    (addr->sa_family != cur_addr->sa_family))
3801			continue;
3802
3803		if (cma_any_addr(addr) || cma_any_addr(cur_addr))
3804			return -EADDRNOTAVAIL;
3805
3806		if (!cma_addr_cmp(addr, cur_addr))
3807			return -EADDRINUSE;
3808	}
3809	return 0;
3810}
3811
3812static int cma_use_port(enum rdma_ucm_port_space ps,
3813			struct rdma_id_private *id_priv)
3814{
3815	struct rdma_bind_list *bind_list;
3816	unsigned short snum;
3817	int ret;
3818
3819	lockdep_assert_held(&lock);
3820
3821	snum = ntohs(cma_port(cma_src_addr(id_priv)));
3822	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
3823		return -EACCES;
3824
3825	bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3826	if (!bind_list) {
3827		ret = cma_alloc_port(ps, id_priv, snum);
3828	} else {
3829		ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
3830		if (!ret)
3831			cma_bind_port(bind_list, id_priv);
3832	}
3833	return ret;
3834}
3835
3836static enum rdma_ucm_port_space
3837cma_select_inet_ps(struct rdma_id_private *id_priv)
3838{
3839	switch (id_priv->id.ps) {
3840	case RDMA_PS_TCP:
3841	case RDMA_PS_UDP:
3842	case RDMA_PS_IPOIB:
3843	case RDMA_PS_IB:
3844		return id_priv->id.ps;
3845	default:
3846
3847		return 0;
3848	}
3849}
3850
3851static enum rdma_ucm_port_space
3852cma_select_ib_ps(struct rdma_id_private *id_priv)
3853{
3854	enum rdma_ucm_port_space ps = 0;
3855	struct sockaddr_ib *sib;
3856	u64 sid_ps, mask, sid;
3857
3858	sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
3859	mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
3860	sid = be64_to_cpu(sib->sib_sid) & mask;
3861
3862	if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
3863		sid_ps = RDMA_IB_IP_PS_IB;
3864		ps = RDMA_PS_IB;
3865	} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
3866		   (sid == (RDMA_IB_IP_PS_TCP & mask))) {
3867		sid_ps = RDMA_IB_IP_PS_TCP;
3868		ps = RDMA_PS_TCP;
3869	} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
3870		   (sid == (RDMA_IB_IP_PS_UDP & mask))) {
3871		sid_ps = RDMA_IB_IP_PS_UDP;
3872		ps = RDMA_PS_UDP;
3873	}
3874
3875	if (ps) {
3876		sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
3877		sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
3878						be64_to_cpu(sib->sib_sid_mask));
3879	}
3880	return ps;
3881}
3882
3883static int cma_get_port(struct rdma_id_private *id_priv)
3884{
3885	enum rdma_ucm_port_space ps;
3886	int ret;
3887
3888	if (cma_family(id_priv) != AF_IB)
3889		ps = cma_select_inet_ps(id_priv);
3890	else
3891		ps = cma_select_ib_ps(id_priv);
3892	if (!ps)
3893		return -EPROTONOSUPPORT;
3894
3895	mutex_lock(&lock);
3896	if (cma_any_port(cma_src_addr(id_priv)))
3897		ret = cma_alloc_any_port(ps, id_priv);
3898	else
3899		ret = cma_use_port(ps, id_priv);
3900	mutex_unlock(&lock);
3901
3902	return ret;
3903}
3904
3905static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
3906			       struct sockaddr *addr)
3907{
3908#if IS_ENABLED(CONFIG_IPV6)
3909	struct sockaddr_in6 *sin6;
3910
3911	if (addr->sa_family != AF_INET6)
3912		return 0;
3913
3914	sin6 = (struct sockaddr_in6 *) addr;
3915
3916	if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
3917		return 0;
3918
3919	if (!sin6->sin6_scope_id)
3920			return -EINVAL;
3921
3922	dev_addr->bound_dev_if = sin6->sin6_scope_id;
3923#endif
3924	return 0;
3925}
3926
3927int rdma_listen(struct rdma_cm_id *id, int backlog)
3928{
3929	struct rdma_id_private *id_priv =
3930		container_of(id, struct rdma_id_private, id);
3931	int ret;
3932
3933	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
3934		struct sockaddr_in any_in = {
3935			.sin_family = AF_INET,
3936			.sin_addr.s_addr = htonl(INADDR_ANY),
3937		};
3938
3939		/* For a well behaved ULP state will be RDMA_CM_IDLE */
3940		ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
3941		if (ret)
3942			return ret;
3943		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
3944					   RDMA_CM_LISTEN)))
3945			return -EINVAL;
3946	}
3947
3948	/*
3949	 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
3950	 * any more, and has to be unique in the bind list.
3951	 */
3952	if (id_priv->reuseaddr) {
3953		mutex_lock(&lock);
3954		ret = cma_check_port(id_priv->bind_list, id_priv, 0);
3955		if (!ret)
3956			id_priv->reuseaddr = 0;
3957		mutex_unlock(&lock);
3958		if (ret)
3959			goto err;
3960	}
3961
3962	id_priv->backlog = backlog;
3963	if (id_priv->cma_dev) {
3964		if (rdma_cap_ib_cm(id->device, 1)) {
3965			ret = cma_ib_listen(id_priv);
3966			if (ret)
3967				goto err;
3968		} else if (rdma_cap_iw_cm(id->device, 1)) {
3969			ret = cma_iw_listen(id_priv, backlog);
3970			if (ret)
3971				goto err;
3972		} else {
3973			ret = -ENOSYS;
3974			goto err;
3975		}
3976	} else {
3977		ret = cma_listen_on_all(id_priv);
3978		if (ret)
3979			goto err;
3980	}
3981
3982	return 0;
3983err:
3984	id_priv->backlog = 0;
3985	/*
3986	 * All the failure paths that lead here will not allow the req_handler's
3987	 * to have run.
3988	 */
3989	cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
3990	return ret;
3991}
3992EXPORT_SYMBOL(rdma_listen);
3993
3994static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
3995			      struct sockaddr *addr, const struct sockaddr *daddr)
3996{
3997	struct sockaddr *id_daddr;
3998	int ret;
3999
4000	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
4001	    addr->sa_family != AF_IB)
4002		return -EAFNOSUPPORT;
4003
4004	if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
4005		return -EINVAL;
4006
4007	ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
4008	if (ret)
4009		goto err1;
4010
4011	memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
4012	if (!cma_any_addr(addr)) {
4013		ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
4014		if (ret)
4015			goto err1;
4016
4017		ret = cma_acquire_dev_by_src_ip(id_priv);
4018		if (ret)
4019			goto err1;
4020	}
4021
4022	if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
4023		if (addr->sa_family == AF_INET)
4024			id_priv->afonly = 1;
4025#if IS_ENABLED(CONFIG_IPV6)
4026		else if (addr->sa_family == AF_INET6) {
4027			struct net *net = id_priv->id.route.addr.dev_addr.net;
4028
4029			id_priv->afonly = net->ipv6.sysctl.bindv6only;
4030		}
4031#endif
4032	}
4033	id_daddr = cma_dst_addr(id_priv);
4034	if (daddr != id_daddr)
4035		memcpy(id_daddr, daddr, rdma_addr_size(addr));
4036	id_daddr->sa_family = addr->sa_family;
4037
4038	ret = cma_get_port(id_priv);
4039	if (ret)
4040		goto err2;
4041
4042	if (!cma_any_addr(addr))
4043		rdma_restrack_add(&id_priv->res);
4044	return 0;
4045err2:
4046	if (id_priv->cma_dev)
4047		cma_release_dev(id_priv);
4048err1:
4049	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
4050	return ret;
4051}
4052
4053static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
4054			 const struct sockaddr *dst_addr)
4055{
4056	struct rdma_id_private *id_priv =
4057		container_of(id, struct rdma_id_private, id);
4058	struct sockaddr_storage zero_sock = {};
4059
4060	if (src_addr && src_addr->sa_family)
4061		return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
4062
4063	/*
4064	 * When the src_addr is not specified, automatically supply an any addr
4065	 */
4066	zero_sock.ss_family = dst_addr->sa_family;
4067	if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
4068		struct sockaddr_in6 *src_addr6 =
4069			(struct sockaddr_in6 *)&zero_sock;
4070		struct sockaddr_in6 *dst_addr6 =
4071			(struct sockaddr_in6 *)dst_addr;
4072
4073		src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
4074		if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
4075			id->route.addr.dev_addr.bound_dev_if =
4076				dst_addr6->sin6_scope_id;
4077	} else if (dst_addr->sa_family == AF_IB) {
4078		((struct sockaddr_ib *)&zero_sock)->sib_pkey =
4079			((struct sockaddr_ib *)dst_addr)->sib_pkey;
4080	}
4081	return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
4082}
4083
4084/*
4085 * If required, resolve the source address for bind and leave the id_priv in
4086 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
4087 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
4088 * ignored.
4089 */
4090static int resolve_prepare_src(struct rdma_id_private *id_priv,
4091			       struct sockaddr *src_addr,
4092			       const struct sockaddr *dst_addr)
4093{
4094	int ret;
4095
4096	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
4097		/* For a well behaved ULP state will be RDMA_CM_IDLE */
4098		ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
4099		if (ret)
4100			return ret;
4101		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
4102					   RDMA_CM_ADDR_QUERY)))
4103			return -EINVAL;
4104
4105	} else {
4106		memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
4107	}
4108
4109	if (cma_family(id_priv) != dst_addr->sa_family) {
4110		ret = -EINVAL;
4111		goto err_state;
4112	}
4113	return 0;
4114
4115err_state:
4116	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
4117	return ret;
4118}
4119
4120int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
4121		      const struct sockaddr *dst_addr, unsigned long timeout_ms)
4122{
4123	struct rdma_id_private *id_priv =
4124		container_of(id, struct rdma_id_private, id);
4125	int ret;
4126
4127	ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
4128	if (ret)
4129		return ret;
4130
4131	if (cma_any_addr(dst_addr)) {
4132		ret = cma_resolve_loopback(id_priv);
4133	} else {
4134		if (dst_addr->sa_family == AF_IB) {
4135			ret = cma_resolve_ib_addr(id_priv);
4136		} else {
4137			/*
4138			 * The FSM can return back to RDMA_CM_ADDR_BOUND after
4139			 * rdma_resolve_ip() is called, eg through the error
4140			 * path in addr_handler(). If this happens the existing
4141			 * request must be canceled before issuing a new one.
4142			 * Since canceling a request is a bit slow and this
4143			 * oddball path is rare, keep track once a request has
4144			 * been issued. The track turns out to be a permanent
4145			 * state since this is the only cancel as it is
4146			 * immediately before rdma_resolve_ip().
4147			 */
4148			if (id_priv->used_resolve_ip)
4149				rdma_addr_cancel(&id->route.addr.dev_addr);
4150			else
4151				id_priv->used_resolve_ip = 1;
4152			ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
4153					      &id->route.addr.dev_addr,
4154					      timeout_ms, addr_handler,
4155					      false, id_priv);
4156		}
4157	}
4158	if (ret)
4159		goto err;
4160
4161	return 0;
4162err:
4163	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
4164	return ret;
4165}
4166EXPORT_SYMBOL(rdma_resolve_addr);
4167
4168int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
4169{
4170	struct rdma_id_private *id_priv =
4171		container_of(id, struct rdma_id_private, id);
4172
4173	return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
4174}
4175EXPORT_SYMBOL(rdma_bind_addr);
4176
4177static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
4178{
4179	struct cma_hdr *cma_hdr;
4180
4181	cma_hdr = hdr;
4182	cma_hdr->cma_version = CMA_VERSION;
4183	if (cma_family(id_priv) == AF_INET) {
4184		struct sockaddr_in *src4, *dst4;
4185
4186		src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
4187		dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
4188
4189		cma_set_ip_ver(cma_hdr, 4);
4190		cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
4191		cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
4192		cma_hdr->port = src4->sin_port;
4193	} else if (cma_family(id_priv) == AF_INET6) {
4194		struct sockaddr_in6 *src6, *dst6;
4195
4196		src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
4197		dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
4198
4199		cma_set_ip_ver(cma_hdr, 6);
4200		cma_hdr->src_addr.ip6 = src6->sin6_addr;
4201		cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
4202		cma_hdr->port = src6->sin6_port;
4203	}
4204	return 0;
4205}
4206
4207static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
4208				const struct ib_cm_event *ib_event)
4209{
4210	struct rdma_id_private *id_priv = cm_id->context;
4211	struct rdma_cm_event event = {};
4212	const struct ib_cm_sidr_rep_event_param *rep =
4213				&ib_event->param.sidr_rep_rcvd;
4214	int ret;
4215
4216	mutex_lock(&id_priv->handler_mutex);
4217	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4218		goto out;
4219
4220	switch (ib_event->event) {
4221	case IB_CM_SIDR_REQ_ERROR:
4222		event.event = RDMA_CM_EVENT_UNREACHABLE;
4223		event.status = -ETIMEDOUT;
4224		break;
4225	case IB_CM_SIDR_REP_RECEIVED:
4226		event.param.ud.private_data = ib_event->private_data;
4227		event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
4228		if (rep->status != IB_SIDR_SUCCESS) {
4229			event.event = RDMA_CM_EVENT_UNREACHABLE;
4230			event.status = ib_event->param.sidr_rep_rcvd.status;
4231			pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
4232					     event.status);
4233			break;
4234		}
4235		ret = cma_set_qkey(id_priv, rep->qkey);
4236		if (ret) {
4237			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
4238			event.event = RDMA_CM_EVENT_ADDR_ERROR;
4239			event.status = ret;
4240			break;
4241		}
4242		ib_init_ah_attr_from_path(id_priv->id.device,
4243					  id_priv->id.port_num,
4244					  id_priv->id.route.path_rec,
4245					  &event.param.ud.ah_attr,
4246					  rep->sgid_attr);
4247		event.param.ud.qp_num = rep->qpn;
4248		event.param.ud.qkey = rep->qkey;
4249		event.event = RDMA_CM_EVENT_ESTABLISHED;
4250		event.status = 0;
4251		break;
4252	default:
4253		pr_err("RDMA CMA: unexpected IB CM event: %d\n",
4254		       ib_event->event);
4255		goto out;
4256	}
4257
4258	ret = cma_cm_event_handler(id_priv, &event);
4259
4260	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4261	if (ret) {
4262		/* Destroy the CM ID by returning a non-zero value. */
4263		id_priv->cm_id.ib = NULL;
4264		destroy_id_handler_unlock(id_priv);
4265		return ret;
4266	}
4267out:
4268	mutex_unlock(&id_priv->handler_mutex);
4269	return 0;
4270}
4271
4272static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
4273			      struct rdma_conn_param *conn_param)
4274{
4275	struct ib_cm_sidr_req_param req;
4276	struct ib_cm_id	*id;
4277	void *private_data;
4278	u8 offset;
4279	int ret;
4280
4281	memset(&req, 0, sizeof req);
4282	offset = cma_user_data_offset(id_priv);
4283	if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
4284		return -EINVAL;
4285
4286	if (req.private_data_len) {
4287		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4288		if (!private_data)
4289			return -ENOMEM;
4290	} else {
4291		private_data = NULL;
4292	}
4293
4294	if (conn_param->private_data && conn_param->private_data_len)
4295		memcpy(private_data + offset, conn_param->private_data,
4296		       conn_param->private_data_len);
4297
4298	if (private_data) {
4299		ret = cma_format_hdr(private_data, id_priv);
4300		if (ret)
4301			goto out;
4302		req.private_data = private_data;
4303	}
4304
4305	id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
4306			     id_priv);
4307	if (IS_ERR(id)) {
4308		ret = PTR_ERR(id);
4309		goto out;
4310	}
4311	id_priv->cm_id.ib = id;
4312
4313	req.path = id_priv->id.route.path_rec;
4314	req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4315	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4316	req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
4317	req.max_cm_retries = CMA_MAX_CM_RETRIES;
4318
4319	trace_cm_send_sidr_req(id_priv);
4320	ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
4321	if (ret) {
4322		ib_destroy_cm_id(id_priv->cm_id.ib);
4323		id_priv->cm_id.ib = NULL;
4324	}
4325out:
4326	kfree(private_data);
4327	return ret;
4328}
4329
4330static int cma_connect_ib(struct rdma_id_private *id_priv,
4331			  struct rdma_conn_param *conn_param)
4332{
4333	struct ib_cm_req_param req;
4334	struct rdma_route *route;
4335	void *private_data;
4336	struct ib_cm_id	*id;
4337	u8 offset;
4338	int ret;
4339
4340	memset(&req, 0, sizeof req);
4341	offset = cma_user_data_offset(id_priv);
4342	if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
4343		return -EINVAL;
4344
4345	if (req.private_data_len) {
4346		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4347		if (!private_data)
4348			return -ENOMEM;
4349	} else {
4350		private_data = NULL;
4351	}
4352
4353	if (conn_param->private_data && conn_param->private_data_len)
4354		memcpy(private_data + offset, conn_param->private_data,
4355		       conn_param->private_data_len);
4356
4357	id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
4358	if (IS_ERR(id)) {
4359		ret = PTR_ERR(id);
4360		goto out;
4361	}
4362	id_priv->cm_id.ib = id;
4363
4364	route = &id_priv->id.route;
4365	if (private_data) {
4366		ret = cma_format_hdr(private_data, id_priv);
4367		if (ret)
4368			goto out;
4369		req.private_data = private_data;
4370	}
4371
4372	req.primary_path = &route->path_rec[0];
4373	req.primary_path_inbound = route->path_rec_inbound;
4374	req.primary_path_outbound = route->path_rec_outbound;
4375	if (route->num_pri_alt_paths == 2)
4376		req.alternate_path = &route->path_rec[1];
4377
4378	req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4379	/* Alternate path SGID attribute currently unsupported */
4380	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4381	req.qp_num = id_priv->qp_num;
4382	req.qp_type = id_priv->id.qp_type;
4383	req.starting_psn = id_priv->seq_num;
4384	req.responder_resources = conn_param->responder_resources;
4385	req.initiator_depth = conn_param->initiator_depth;
4386	req.flow_control = conn_param->flow_control;
4387	req.retry_count = min_t(u8, 7, conn_param->retry_count);
4388	req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4389	req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4390	req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4391	req.max_cm_retries = CMA_MAX_CM_RETRIES;
4392	req.srq = id_priv->srq ? 1 : 0;
4393	req.ece.vendor_id = id_priv->ece.vendor_id;
4394	req.ece.attr_mod = id_priv->ece.attr_mod;
4395
4396	trace_cm_send_req(id_priv);
4397	ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
4398out:
4399	if (ret && !IS_ERR(id)) {
4400		ib_destroy_cm_id(id);
4401		id_priv->cm_id.ib = NULL;
4402	}
4403
4404	kfree(private_data);
4405	return ret;
4406}
4407
4408static int cma_connect_iw(struct rdma_id_private *id_priv,
4409			  struct rdma_conn_param *conn_param)
4410{
4411	struct iw_cm_id *cm_id;
4412	int ret;
4413	struct iw_cm_conn_param iw_param;
4414
4415	cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
4416	if (IS_ERR(cm_id))
4417		return PTR_ERR(cm_id);
4418
4419	mutex_lock(&id_priv->qp_mutex);
4420	cm_id->tos = id_priv->tos;
4421	cm_id->tos_set = id_priv->tos_set;
4422	mutex_unlock(&id_priv->qp_mutex);
4423
4424	id_priv->cm_id.iw = cm_id;
4425
4426	memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
4427	       rdma_addr_size(cma_src_addr(id_priv)));
4428	memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
4429	       rdma_addr_size(cma_dst_addr(id_priv)));
4430
4431	ret = cma_modify_qp_rtr(id_priv, conn_param);
4432	if (ret)
4433		goto out;
4434
4435	if (conn_param) {
4436		iw_param.ord = conn_param->initiator_depth;
4437		iw_param.ird = conn_param->responder_resources;
4438		iw_param.private_data = conn_param->private_data;
4439		iw_param.private_data_len = conn_param->private_data_len;
4440		iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
4441	} else {
4442		memset(&iw_param, 0, sizeof iw_param);
4443		iw_param.qpn = id_priv->qp_num;
4444	}
4445	ret = iw_cm_connect(cm_id, &iw_param);
4446out:
4447	if (ret) {
4448		iw_destroy_cm_id(cm_id);
4449		id_priv->cm_id.iw = NULL;
4450	}
4451	return ret;
4452}
4453
4454/**
4455 * rdma_connect_locked - Initiate an active connection request.
4456 * @id: Connection identifier to connect.
4457 * @conn_param: Connection information used for connected QPs.
4458 *
4459 * Same as rdma_connect() but can only be called from the
4460 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
4461 */
4462int rdma_connect_locked(struct rdma_cm_id *id,
4463			struct rdma_conn_param *conn_param)
4464{
4465	struct rdma_id_private *id_priv =
4466		container_of(id, struct rdma_id_private, id);
4467	int ret;
4468
4469	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
4470		return -EINVAL;
4471
4472	if (!id->qp) {
4473		id_priv->qp_num = conn_param->qp_num;
4474		id_priv->srq = conn_param->srq;
4475	}
4476
4477	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4478		if (id->qp_type == IB_QPT_UD)
4479			ret = cma_resolve_ib_udp(id_priv, conn_param);
4480		else
4481			ret = cma_connect_ib(id_priv, conn_param);
4482	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4483		ret = cma_connect_iw(id_priv, conn_param);
4484	} else {
4485		ret = -ENOSYS;
4486	}
4487	if (ret)
4488		goto err_state;
4489	return 0;
4490err_state:
4491	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
4492	return ret;
4493}
4494EXPORT_SYMBOL(rdma_connect_locked);
4495
4496/**
4497 * rdma_connect - Initiate an active connection request.
4498 * @id: Connection identifier to connect.
4499 * @conn_param: Connection information used for connected QPs.
4500 *
4501 * Users must have resolved a route for the rdma_cm_id to connect with by having
4502 * called rdma_resolve_route before calling this routine.
4503 *
4504 * This call will either connect to a remote QP or obtain remote QP information
4505 * for unconnected rdma_cm_id's.  The actual operation is based on the
4506 * rdma_cm_id's port space.
4507 */
4508int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4509{
4510	struct rdma_id_private *id_priv =
4511		container_of(id, struct rdma_id_private, id);
4512	int ret;
4513
4514	mutex_lock(&id_priv->handler_mutex);
4515	ret = rdma_connect_locked(id, conn_param);
4516	mutex_unlock(&id_priv->handler_mutex);
4517	return ret;
4518}
4519EXPORT_SYMBOL(rdma_connect);
4520
4521/**
4522 * rdma_connect_ece - Initiate an active connection request with ECE data.
4523 * @id: Connection identifier to connect.
4524 * @conn_param: Connection information used for connected QPs.
4525 * @ece: ECE parameters
4526 *
4527 * See rdma_connect() explanation.
4528 */
4529int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4530		     struct rdma_ucm_ece *ece)
4531{
4532	struct rdma_id_private *id_priv =
4533		container_of(id, struct rdma_id_private, id);
4534
4535	id_priv->ece.vendor_id = ece->vendor_id;
4536	id_priv->ece.attr_mod = ece->attr_mod;
4537
4538	return rdma_connect(id, conn_param);
4539}
4540EXPORT_SYMBOL(rdma_connect_ece);
4541
4542static int cma_accept_ib(struct rdma_id_private *id_priv,
4543			 struct rdma_conn_param *conn_param)
4544{
4545	struct ib_cm_rep_param rep;
4546	int ret;
4547
4548	ret = cma_modify_qp_rtr(id_priv, conn_param);
4549	if (ret)
4550		goto out;
4551
4552	ret = cma_modify_qp_rts(id_priv, conn_param);
4553	if (ret)
4554		goto out;
4555
4556	memset(&rep, 0, sizeof rep);
4557	rep.qp_num = id_priv->qp_num;
4558	rep.starting_psn = id_priv->seq_num;
4559	rep.private_data = conn_param->private_data;
4560	rep.private_data_len = conn_param->private_data_len;
4561	rep.responder_resources = conn_param->responder_resources;
4562	rep.initiator_depth = conn_param->initiator_depth;
4563	rep.failover_accepted = 0;
4564	rep.flow_control = conn_param->flow_control;
4565	rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4566	rep.srq = id_priv->srq ? 1 : 0;
4567	rep.ece.vendor_id = id_priv->ece.vendor_id;
4568	rep.ece.attr_mod = id_priv->ece.attr_mod;
4569
4570	trace_cm_send_rep(id_priv);
4571	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
4572out:
4573	return ret;
4574}
4575
4576static int cma_accept_iw(struct rdma_id_private *id_priv,
4577		  struct rdma_conn_param *conn_param)
4578{
4579	struct iw_cm_conn_param iw_param;
4580	int ret;
4581
4582	if (!conn_param)
4583		return -EINVAL;
4584
4585	ret = cma_modify_qp_rtr(id_priv, conn_param);
4586	if (ret)
4587		return ret;
4588
4589	iw_param.ord = conn_param->initiator_depth;
4590	iw_param.ird = conn_param->responder_resources;
4591	iw_param.private_data = conn_param->private_data;
4592	iw_param.private_data_len = conn_param->private_data_len;
4593	if (id_priv->id.qp)
4594		iw_param.qpn = id_priv->qp_num;
4595	else
4596		iw_param.qpn = conn_param->qp_num;
4597
4598	return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
4599}
4600
4601static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
4602			     enum ib_cm_sidr_status status, u32 qkey,
4603			     const void *private_data, int private_data_len)
4604{
4605	struct ib_cm_sidr_rep_param rep;
4606	int ret;
4607
4608	memset(&rep, 0, sizeof rep);
4609	rep.status = status;
4610	if (status == IB_SIDR_SUCCESS) {
4611		if (qkey)
4612			ret = cma_set_qkey(id_priv, qkey);
4613		else
4614			ret = cma_set_default_qkey(id_priv);
4615		if (ret)
4616			return ret;
4617		rep.qp_num = id_priv->qp_num;
4618		rep.qkey = id_priv->qkey;
4619
4620		rep.ece.vendor_id = id_priv->ece.vendor_id;
4621		rep.ece.attr_mod = id_priv->ece.attr_mod;
4622	}
4623
4624	rep.private_data = private_data;
4625	rep.private_data_len = private_data_len;
4626
4627	trace_cm_send_sidr_rep(id_priv);
4628	return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
4629}
4630
4631/**
4632 * rdma_accept - Called to accept a connection request or response.
4633 * @id: Connection identifier associated with the request.
4634 * @conn_param: Information needed to establish the connection.  This must be
4635 *   provided if accepting a connection request.  If accepting a connection
4636 *   response, this parameter must be NULL.
4637 *
4638 * Typically, this routine is only called by the listener to accept a connection
4639 * request.  It must also be called on the active side of a connection if the
4640 * user is performing their own QP transitions.
4641 *
4642 * In the case of error, a reject message is sent to the remote side and the
4643 * state of the qp associated with the id is modified to error, such that any
4644 * previously posted receive buffers would be flushed.
4645 *
4646 * This function is for use by kernel ULPs and must be called from under the
4647 * handler callback.
4648 */
4649int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4650{
4651	struct rdma_id_private *id_priv =
4652		container_of(id, struct rdma_id_private, id);
4653	int ret;
4654
4655	lockdep_assert_held(&id_priv->handler_mutex);
4656
4657	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4658		return -EINVAL;
4659
4660	if (!id->qp && conn_param) {
4661		id_priv->qp_num = conn_param->qp_num;
4662		id_priv->srq = conn_param->srq;
4663	}
4664
4665	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4666		if (id->qp_type == IB_QPT_UD) {
4667			if (conn_param)
4668				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4669							conn_param->qkey,
4670							conn_param->private_data,
4671							conn_param->private_data_len);
4672			else
4673				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4674							0, NULL, 0);
4675		} else {
4676			if (conn_param)
4677				ret = cma_accept_ib(id_priv, conn_param);
4678			else
4679				ret = cma_rep_recv(id_priv);
4680		}
4681	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4682		ret = cma_accept_iw(id_priv, conn_param);
4683	} else {
4684		ret = -ENOSYS;
4685	}
4686	if (ret)
4687		goto reject;
4688
4689	return 0;
4690reject:
4691	cma_modify_qp_err(id_priv);
4692	rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
4693	return ret;
4694}
4695EXPORT_SYMBOL(rdma_accept);
4696
4697int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4698		    struct rdma_ucm_ece *ece)
4699{
4700	struct rdma_id_private *id_priv =
4701		container_of(id, struct rdma_id_private, id);
4702
4703	id_priv->ece.vendor_id = ece->vendor_id;
4704	id_priv->ece.attr_mod = ece->attr_mod;
4705
4706	return rdma_accept(id, conn_param);
4707}
4708EXPORT_SYMBOL(rdma_accept_ece);
4709
4710void rdma_lock_handler(struct rdma_cm_id *id)
4711{
4712	struct rdma_id_private *id_priv =
4713		container_of(id, struct rdma_id_private, id);
4714
4715	mutex_lock(&id_priv->handler_mutex);
4716}
4717EXPORT_SYMBOL(rdma_lock_handler);
4718
4719void rdma_unlock_handler(struct rdma_cm_id *id)
4720{
4721	struct rdma_id_private *id_priv =
4722		container_of(id, struct rdma_id_private, id);
4723
4724	mutex_unlock(&id_priv->handler_mutex);
4725}
4726EXPORT_SYMBOL(rdma_unlock_handler);
4727
4728int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
4729{
4730	struct rdma_id_private *id_priv;
4731	int ret;
4732
4733	id_priv = container_of(id, struct rdma_id_private, id);
4734	if (!id_priv->cm_id.ib)
4735		return -EINVAL;
4736
4737	switch (id->device->node_type) {
4738	case RDMA_NODE_IB_CA:
4739		ret = ib_cm_notify(id_priv->cm_id.ib, event);
4740		break;
4741	default:
4742		ret = 0;
4743		break;
4744	}
4745	return ret;
4746}
4747EXPORT_SYMBOL(rdma_notify);
4748
4749int rdma_reject(struct rdma_cm_id *id, const void *private_data,
4750		u8 private_data_len, u8 reason)
4751{
4752	struct rdma_id_private *id_priv;
4753	int ret;
4754
4755	id_priv = container_of(id, struct rdma_id_private, id);
4756	if (!id_priv->cm_id.ib)
4757		return -EINVAL;
4758
4759	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4760		if (id->qp_type == IB_QPT_UD) {
4761			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
4762						private_data, private_data_len);
4763		} else {
4764			trace_cm_send_rej(id_priv);
4765			ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
4766					     private_data, private_data_len);
4767		}
4768	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4769		ret = iw_cm_reject(id_priv->cm_id.iw,
4770				   private_data, private_data_len);
4771	} else {
4772		ret = -ENOSYS;
4773	}
4774
4775	return ret;
4776}
4777EXPORT_SYMBOL(rdma_reject);
4778
4779int rdma_disconnect(struct rdma_cm_id *id)
4780{
4781	struct rdma_id_private *id_priv;
4782	int ret;
4783
4784	id_priv = container_of(id, struct rdma_id_private, id);
4785	if (!id_priv->cm_id.ib)
4786		return -EINVAL;
4787
4788	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4789		ret = cma_modify_qp_err(id_priv);
4790		if (ret)
4791			goto out;
4792		/* Initiate or respond to a disconnect. */
4793		trace_cm_disconnect(id_priv);
4794		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
4795			if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
4796				trace_cm_sent_drep(id_priv);
4797		} else {
4798			trace_cm_sent_dreq(id_priv);
4799		}
4800	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4801		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
4802	} else
4803		ret = -EINVAL;
4804
4805out:
4806	return ret;
4807}
4808EXPORT_SYMBOL(rdma_disconnect);
4809
4810static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
4811			      struct ib_sa_multicast *multicast,
4812			      struct rdma_cm_event *event,
4813			      struct cma_multicast *mc)
4814{
4815	struct rdma_dev_addr *dev_addr;
4816	enum ib_gid_type gid_type;
4817	struct net_device *ndev;
4818
4819	if (status)
4820		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4821				     status);
4822
4823	event->status = status;
4824	event->param.ud.private_data = mc->context;
4825	if (status) {
4826		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4827		return;
4828	}
4829
4830	dev_addr = &id_priv->id.route.addr.dev_addr;
4831	ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4832	gid_type =
4833		id_priv->cma_dev
4834			->default_gid_type[id_priv->id.port_num -
4835					   rdma_start_port(
4836						   id_priv->cma_dev->device)];
4837
4838	event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
4839	if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
4840				     &multicast->rec, ndev, gid_type,
4841				     &event->param.ud.ah_attr)) {
4842		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4843		goto out;
4844	}
4845
4846	event->param.ud.qp_num = 0xFFFFFF;
4847	event->param.ud.qkey = id_priv->qkey;
4848
4849out:
4850	dev_put(ndev);
4851}
4852
4853static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
4854{
4855	struct cma_multicast *mc = multicast->context;
4856	struct rdma_id_private *id_priv = mc->id_priv;
4857	struct rdma_cm_event event = {};
4858	int ret = 0;
4859
4860	mutex_lock(&id_priv->handler_mutex);
4861	if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
4862	    READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
4863		goto out;
4864
4865	ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
4866	if (!ret) {
4867		cma_make_mc_event(status, id_priv, multicast, &event, mc);
4868		ret = cma_cm_event_handler(id_priv, &event);
4869	}
4870	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4871	WARN_ON(ret);
4872
4873out:
4874	mutex_unlock(&id_priv->handler_mutex);
4875	return 0;
4876}
4877
4878static void cma_set_mgid(struct rdma_id_private *id_priv,
4879			 struct sockaddr *addr, union ib_gid *mgid)
4880{
4881	unsigned char mc_map[MAX_ADDR_LEN];
4882	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4883	struct sockaddr_in *sin = (struct sockaddr_in *) addr;
4884	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
4885
4886	if (cma_any_addr(addr)) {
4887		memset(mgid, 0, sizeof *mgid);
4888	} else if ((addr->sa_family == AF_INET6) &&
4889		   ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
4890								 0xFF10A01B)) {
4891		/* IPv6 address is an SA assigned MGID. */
4892		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4893	} else if (addr->sa_family == AF_IB) {
4894		memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
4895	} else if (addr->sa_family == AF_INET6) {
4896		ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
4897		if (id_priv->id.ps == RDMA_PS_UDP)
4898			mc_map[7] = 0x01;	/* Use RDMA CM signature */
4899		*mgid = *(union ib_gid *) (mc_map + 4);
4900	} else {
4901		ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
4902		if (id_priv->id.ps == RDMA_PS_UDP)
4903			mc_map[7] = 0x01;	/* Use RDMA CM signature */
4904		*mgid = *(union ib_gid *) (mc_map + 4);
4905	}
4906}
4907
4908static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
4909				 struct cma_multicast *mc)
4910{
4911	struct ib_sa_mcmember_rec rec;
4912	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4913	ib_sa_comp_mask comp_mask;
4914	int ret;
4915
4916	ib_addr_get_mgid(dev_addr, &rec.mgid);
4917	ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4918				     &rec.mgid, &rec);
4919	if (ret)
4920		return ret;
4921
4922	if (!id_priv->qkey) {
4923		ret = cma_set_default_qkey(id_priv);
4924		if (ret)
4925			return ret;
4926	}
4927
4928	cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
4929	rec.qkey = cpu_to_be32(id_priv->qkey);
4930	rdma_addr_get_sgid(dev_addr, &rec.port_gid);
4931	rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
4932	rec.join_state = mc->join_state;
4933
4934	comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
4935		    IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
4936		    IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
4937		    IB_SA_MCMEMBER_REC_FLOW_LABEL |
4938		    IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
4939
4940	if (id_priv->id.ps == RDMA_PS_IPOIB)
4941		comp_mask |= IB_SA_MCMEMBER_REC_RATE |
4942			     IB_SA_MCMEMBER_REC_RATE_SELECTOR |
4943			     IB_SA_MCMEMBER_REC_MTU_SELECTOR |
4944			     IB_SA_MCMEMBER_REC_MTU |
4945			     IB_SA_MCMEMBER_REC_HOP_LIMIT;
4946
4947	mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4948					 id_priv->id.port_num, &rec, comp_mask,
4949					 GFP_KERNEL, cma_ib_mc_handler, mc);
4950	return PTR_ERR_OR_ZERO(mc->sa_mc);
4951}
4952
4953static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
4954			      enum ib_gid_type gid_type)
4955{
4956	struct sockaddr_in *sin = (struct sockaddr_in *)addr;
4957	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
4958
4959	if (cma_any_addr(addr)) {
4960		memset(mgid, 0, sizeof *mgid);
4961	} else if (addr->sa_family == AF_INET6) {
4962		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4963	} else {
4964		mgid->raw[0] =
4965			(gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
4966		mgid->raw[1] =
4967			(gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
4968		mgid->raw[2] = 0;
4969		mgid->raw[3] = 0;
4970		mgid->raw[4] = 0;
4971		mgid->raw[5] = 0;
4972		mgid->raw[6] = 0;
4973		mgid->raw[7] = 0;
4974		mgid->raw[8] = 0;
4975		mgid->raw[9] = 0;
4976		mgid->raw[10] = 0xff;
4977		mgid->raw[11] = 0xff;
4978		*(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
4979	}
4980}
4981
4982static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
4983				   struct cma_multicast *mc)
4984{
4985	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4986	int err = 0;
4987	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
4988	struct net_device *ndev = NULL;
4989	struct ib_sa_multicast ib = {};
4990	enum ib_gid_type gid_type;
4991	bool send_only;
4992
4993	send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
4994
4995	if (cma_zero_addr(addr))
4996		return -EINVAL;
4997
4998	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4999		   rdma_start_port(id_priv->cma_dev->device)];
5000	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
5001
5002	ib.rec.pkey = cpu_to_be16(0xffff);
5003	if (dev_addr->bound_dev_if)
5004		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
5005	if (!ndev)
5006		return -ENODEV;
5007
5008	ib.rec.rate = IB_RATE_PORT_CURRENT;
5009	ib.rec.hop_limit = 1;
5010	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
5011
5012	if (addr->sa_family == AF_INET) {
5013		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
5014			ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
5015			if (!send_only) {
5016				err = cma_igmp_send(ndev, &ib.rec.mgid,
5017						    true);
5018			}
5019		}
5020	} else {
5021		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
5022			err = -ENOTSUPP;
5023	}
5024	dev_put(ndev);
5025	if (err || !ib.rec.mtu)
5026		return err ?: -EINVAL;
5027
5028	if (!id_priv->qkey)
5029		cma_set_default_qkey(id_priv);
5030
5031	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
5032		    &ib.rec.port_gid);
5033	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
5034	cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
5035	queue_work(cma_wq, &mc->iboe_join.work);
5036	return 0;
5037}
5038
5039int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
5040			u8 join_state, void *context)
5041{
5042	struct rdma_id_private *id_priv =
5043		container_of(id, struct rdma_id_private, id);
5044	struct cma_multicast *mc;
5045	int ret;
5046
5047	/* Not supported for kernel QPs */
5048	if (WARN_ON(id->qp))
5049		return -EINVAL;
5050
5051	/* ULP is calling this wrong. */
5052	if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
5053			    READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
5054		return -EINVAL;
5055
5056	if (id_priv->id.qp_type != IB_QPT_UD)
5057		return -EINVAL;
5058
5059	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
5060	if (!mc)
5061		return -ENOMEM;
5062
5063	memcpy(&mc->addr, addr, rdma_addr_size(addr));
5064	mc->context = context;
5065	mc->id_priv = id_priv;
5066	mc->join_state = join_state;
5067
5068	if (rdma_protocol_roce(id->device, id->port_num)) {
5069		ret = cma_iboe_join_multicast(id_priv, mc);
5070		if (ret)
5071			goto out_err;
5072	} else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
5073		ret = cma_join_ib_multicast(id_priv, mc);
5074		if (ret)
5075			goto out_err;
5076	} else {
5077		ret = -ENOSYS;
5078		goto out_err;
5079	}
5080
5081	spin_lock(&id_priv->lock);
5082	list_add(&mc->list, &id_priv->mc_list);
5083	spin_unlock(&id_priv->lock);
5084
5085	return 0;
5086out_err:
5087	kfree(mc);
5088	return ret;
5089}
5090EXPORT_SYMBOL(rdma_join_multicast);
5091
5092void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
5093{
5094	struct rdma_id_private *id_priv;
5095	struct cma_multicast *mc;
5096
5097	id_priv = container_of(id, struct rdma_id_private, id);
5098	spin_lock_irq(&id_priv->lock);
5099	list_for_each_entry(mc, &id_priv->mc_list, list) {
5100		if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
5101			continue;
5102		list_del(&mc->list);
5103		spin_unlock_irq(&id_priv->lock);
5104
5105		WARN_ON(id_priv->cma_dev->device != id->device);
5106		destroy_mc(id_priv, mc);
5107		return;
5108	}
5109	spin_unlock_irq(&id_priv->lock);
5110}
5111EXPORT_SYMBOL(rdma_leave_multicast);
5112
5113static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
5114{
5115	struct rdma_dev_addr *dev_addr;
5116	struct cma_work *work;
5117
5118	dev_addr = &id_priv->id.route.addr.dev_addr;
5119
5120	if ((dev_addr->bound_dev_if == ndev->ifindex) &&
5121	    (net_eq(dev_net(ndev), dev_addr->net)) &&
5122	    memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
5123		pr_info("RDMA CM addr change for ndev %s used by id %p\n",
5124			ndev->name, &id_priv->id);
5125		work = kzalloc(sizeof *work, GFP_KERNEL);
5126		if (!work)
5127			return -ENOMEM;
5128
5129		INIT_WORK(&work->work, cma_work_handler);
5130		work->id = id_priv;
5131		work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
5132		cma_id_get(id_priv);
5133		queue_work(cma_wq, &work->work);
5134	}
5135
5136	return 0;
5137}
5138
5139static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
5140			       void *ptr)
5141{
5142	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5143	struct cma_device *cma_dev;
5144	struct rdma_id_private *id_priv;
5145	int ret = NOTIFY_DONE;
5146
5147	if (event != NETDEV_BONDING_FAILOVER)
5148		return NOTIFY_DONE;
5149
5150	if (!netif_is_bond_master(ndev))
5151		return NOTIFY_DONE;
5152
5153	mutex_lock(&lock);
5154	list_for_each_entry(cma_dev, &dev_list, list)
5155		list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
5156			ret = cma_netdev_change(ndev, id_priv);
5157			if (ret)
5158				goto out;
5159		}
5160
5161out:
5162	mutex_unlock(&lock);
5163	return ret;
5164}
5165
5166static void cma_netevent_work_handler(struct work_struct *_work)
5167{
5168	struct rdma_id_private *id_priv =
5169		container_of(_work, struct rdma_id_private, id.net_work);
5170	struct rdma_cm_event event = {};
5171
5172	mutex_lock(&id_priv->handler_mutex);
5173
5174	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
5175	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
5176		goto out_unlock;
5177
5178	event.event = RDMA_CM_EVENT_UNREACHABLE;
5179	event.status = -ETIMEDOUT;
5180
5181	if (cma_cm_event_handler(id_priv, &event)) {
5182		__acquire(&id_priv->handler_mutex);
5183		id_priv->cm_id.ib = NULL;
5184		cma_id_put(id_priv);
5185		destroy_id_handler_unlock(id_priv);
5186		return;
5187	}
5188
5189out_unlock:
5190	mutex_unlock(&id_priv->handler_mutex);
5191	cma_id_put(id_priv);
5192}
5193
5194static int cma_netevent_callback(struct notifier_block *self,
5195				 unsigned long event, void *ctx)
5196{
5197	struct id_table_entry *ips_node = NULL;
5198	struct rdma_id_private *current_id;
5199	struct neighbour *neigh = ctx;
5200	unsigned long flags;
5201
5202	if (event != NETEVENT_NEIGH_UPDATE)
5203		return NOTIFY_DONE;
5204
5205	spin_lock_irqsave(&id_table_lock, flags);
5206	if (neigh->tbl->family == AF_INET6) {
5207		struct sockaddr_in6 neigh_sock_6;
5208
5209		neigh_sock_6.sin6_family = AF_INET6;
5210		neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
5211		ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5212					     (struct sockaddr *)&neigh_sock_6);
5213	} else if (neigh->tbl->family == AF_INET) {
5214		struct sockaddr_in neigh_sock_4;
5215
5216		neigh_sock_4.sin_family = AF_INET;
5217		neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
5218		ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5219					     (struct sockaddr *)&neigh_sock_4);
5220	} else
5221		goto out;
5222
5223	if (!ips_node)
5224		goto out;
5225
5226	list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
5227		if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
5228			   neigh->ha, ETH_ALEN))
5229			continue;
5230		INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
5231		cma_id_get(current_id);
5232		queue_work(cma_wq, &current_id->id.net_work);
5233	}
5234out:
5235	spin_unlock_irqrestore(&id_table_lock, flags);
5236	return NOTIFY_DONE;
5237}
5238
5239static struct notifier_block cma_nb = {
5240	.notifier_call = cma_netdev_callback
5241};
5242
5243static struct notifier_block cma_netevent_cb = {
5244	.notifier_call = cma_netevent_callback
5245};
5246
5247static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
5248{
5249	struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
5250	enum rdma_cm_state state;
5251	unsigned long flags;
5252
5253	mutex_lock(&id_priv->handler_mutex);
5254	/* Record that we want to remove the device */
5255	spin_lock_irqsave(&id_priv->lock, flags);
5256	state = id_priv->state;
5257	if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
5258		spin_unlock_irqrestore(&id_priv->lock, flags);
5259		mutex_unlock(&id_priv->handler_mutex);
5260		cma_id_put(id_priv);
5261		return;
5262	}
5263	id_priv->state = RDMA_CM_DEVICE_REMOVAL;
5264	spin_unlock_irqrestore(&id_priv->lock, flags);
5265
5266	if (cma_cm_event_handler(id_priv, &event)) {
5267		/*
5268		 * At this point the ULP promises it won't call
5269		 * rdma_destroy_id() concurrently
5270		 */
5271		cma_id_put(id_priv);
5272		mutex_unlock(&id_priv->handler_mutex);
5273		trace_cm_id_destroy(id_priv);
5274		_destroy_id(id_priv, state);
5275		return;
5276	}
5277	mutex_unlock(&id_priv->handler_mutex);
5278
5279	/*
5280	 * If this races with destroy then the thread that first assigns state
5281	 * to a destroying does the cancel.
5282	 */
5283	cma_cancel_operation(id_priv, state);
5284	cma_id_put(id_priv);
5285}
5286
5287static void cma_process_remove(struct cma_device *cma_dev)
5288{
5289	mutex_lock(&lock);
5290	while (!list_empty(&cma_dev->id_list)) {
5291		struct rdma_id_private *id_priv = list_first_entry(
5292			&cma_dev->id_list, struct rdma_id_private, device_item);
5293
5294		list_del_init(&id_priv->listen_item);
5295		list_del_init(&id_priv->device_item);
5296		cma_id_get(id_priv);
5297		mutex_unlock(&lock);
5298
5299		cma_send_device_removal_put(id_priv);
5300
5301		mutex_lock(&lock);
5302	}
5303	mutex_unlock(&lock);
5304
5305	cma_dev_put(cma_dev);
5306	wait_for_completion(&cma_dev->comp);
5307}
5308
5309static bool cma_supported(struct ib_device *device)
5310{
5311	u32 i;
5312
5313	rdma_for_each_port(device, i) {
5314		if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
5315			return true;
5316	}
5317	return false;
5318}
5319
5320static int cma_add_one(struct ib_device *device)
5321{
5322	struct rdma_id_private *to_destroy;
5323	struct cma_device *cma_dev;
5324	struct rdma_id_private *id_priv;
5325	unsigned long supported_gids = 0;
5326	int ret;
5327	u32 i;
5328
5329	if (!cma_supported(device))
5330		return -EOPNOTSUPP;
5331
5332	cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
5333	if (!cma_dev)
5334		return -ENOMEM;
5335
5336	cma_dev->device = device;
5337	cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
5338					    sizeof(*cma_dev->default_gid_type),
5339					    GFP_KERNEL);
5340	if (!cma_dev->default_gid_type) {
5341		ret = -ENOMEM;
5342		goto free_cma_dev;
5343	}
5344
5345	cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
5346					    sizeof(*cma_dev->default_roce_tos),
5347					    GFP_KERNEL);
5348	if (!cma_dev->default_roce_tos) {
5349		ret = -ENOMEM;
5350		goto free_gid_type;
5351	}
5352
5353	rdma_for_each_port (device, i) {
5354		supported_gids = roce_gid_type_mask_support(device, i);
5355		WARN_ON(!supported_gids);
5356		if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
5357			cma_dev->default_gid_type[i - rdma_start_port(device)] =
5358				CMA_PREFERRED_ROCE_GID_TYPE;
5359		else
5360			cma_dev->default_gid_type[i - rdma_start_port(device)] =
5361				find_first_bit(&supported_gids, BITS_PER_LONG);
5362		cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
5363	}
5364
5365	init_completion(&cma_dev->comp);
5366	refcount_set(&cma_dev->refcount, 1);
5367	INIT_LIST_HEAD(&cma_dev->id_list);
5368	ib_set_client_data(device, &cma_client, cma_dev);
5369
5370	mutex_lock(&lock);
5371	list_add_tail(&cma_dev->list, &dev_list);
5372	list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
5373		ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
5374		if (ret)
5375			goto free_listen;
5376	}
5377	mutex_unlock(&lock);
5378
5379	trace_cm_add_one(device);
5380	return 0;
5381
5382free_listen:
5383	list_del(&cma_dev->list);
5384	mutex_unlock(&lock);
5385
5386	/* cma_process_remove() will delete to_destroy */
5387	cma_process_remove(cma_dev);
5388	kfree(cma_dev->default_roce_tos);
5389free_gid_type:
5390	kfree(cma_dev->default_gid_type);
5391
5392free_cma_dev:
5393	kfree(cma_dev);
5394	return ret;
5395}
5396
5397static void cma_remove_one(struct ib_device *device, void *client_data)
5398{
5399	struct cma_device *cma_dev = client_data;
5400
5401	trace_cm_remove_one(device);
5402
5403	mutex_lock(&lock);
5404	list_del(&cma_dev->list);
5405	mutex_unlock(&lock);
5406
5407	cma_process_remove(cma_dev);
5408	kfree(cma_dev->default_roce_tos);
5409	kfree(cma_dev->default_gid_type);
5410	kfree(cma_dev);
5411}
5412
5413static int cma_init_net(struct net *net)
5414{
5415	struct cma_pernet *pernet = cma_pernet(net);
5416
5417	xa_init(&pernet->tcp_ps);
5418	xa_init(&pernet->udp_ps);
5419	xa_init(&pernet->ipoib_ps);
5420	xa_init(&pernet->ib_ps);
5421
5422	return 0;
5423}
5424
5425static void cma_exit_net(struct net *net)
5426{
5427	struct cma_pernet *pernet = cma_pernet(net);
5428
5429	WARN_ON(!xa_empty(&pernet->tcp_ps));
5430	WARN_ON(!xa_empty(&pernet->udp_ps));
5431	WARN_ON(!xa_empty(&pernet->ipoib_ps));
5432	WARN_ON(!xa_empty(&pernet->ib_ps));
5433}
5434
5435static struct pernet_operations cma_pernet_operations = {
5436	.init = cma_init_net,
5437	.exit = cma_exit_net,
5438	.id = &cma_pernet_id,
5439	.size = sizeof(struct cma_pernet),
5440};
5441
5442static int __init cma_init(void)
5443{
5444	int ret;
5445
5446	/*
5447	 * There is a rare lock ordering dependency in cma_netdev_callback()
5448	 * that only happens when bonding is enabled. Teach lockdep that rtnl
5449	 * must never be nested under lock so it can find these without having
5450	 * to test with bonding.
5451	 */
5452	if (IS_ENABLED(CONFIG_LOCKDEP)) {
5453		rtnl_lock();
5454		mutex_lock(&lock);
5455		mutex_unlock(&lock);
5456		rtnl_unlock();
5457	}
5458
5459	cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
5460	if (!cma_wq)
5461		return -ENOMEM;
5462
5463	ret = register_pernet_subsys(&cma_pernet_operations);
5464	if (ret)
5465		goto err_wq;
5466
5467	ib_sa_register_client(&sa_client);
5468	register_netdevice_notifier(&cma_nb);
5469	register_netevent_notifier(&cma_netevent_cb);
5470
5471	ret = ib_register_client(&cma_client);
5472	if (ret)
5473		goto err;
5474
5475	ret = cma_configfs_init();
5476	if (ret)
5477		goto err_ib;
5478
5479	return 0;
5480
5481err_ib:
5482	ib_unregister_client(&cma_client);
5483err:
5484	unregister_netevent_notifier(&cma_netevent_cb);
5485	unregister_netdevice_notifier(&cma_nb);
5486	ib_sa_unregister_client(&sa_client);
5487	unregister_pernet_subsys(&cma_pernet_operations);
5488err_wq:
5489	destroy_workqueue(cma_wq);
5490	return ret;
5491}
5492
5493static void __exit cma_cleanup(void)
5494{
5495	cma_configfs_exit();
5496	ib_unregister_client(&cma_client);
5497	unregister_netevent_notifier(&cma_netevent_cb);
5498	unregister_netdevice_notifier(&cma_nb);
5499	ib_sa_unregister_client(&sa_client);
5500	unregister_pernet_subsys(&cma_pernet_operations);
5501	destroy_workqueue(cma_wq);
5502}
5503
5504module_init(cma_init);
5505module_exit(cma_cleanup);