Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
   4 *
   5 *  AF_SMC protocol family socket handler keeping the AF_INET sock address type
   6 *  applies to SOCK_STREAM sockets only
   7 *  offers an alternative communication option for TCP-protocol sockets
   8 *  applicable with RoCE-cards only
   9 *
  10 *  Initial restrictions:
  11 *    - support for alternate links postponed
  12 *
  13 *  Copyright IBM Corp. 2016, 2018
  14 *
  15 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
  16 *              based on prototype from Frank Blaschka
  17 */
  18
  19#define KMSG_COMPONENT "smc"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/module.h>
  23#include <linux/socket.h>
  24#include <linux/workqueue.h>
  25#include <linux/in.h>
  26#include <linux/sched/signal.h>
  27#include <linux/if_vlan.h>
  28#include <linux/rcupdate_wait.h>
  29#include <linux/ctype.h>
  30#include <linux/splice.h>
  31
  32#include <net/sock.h>
  33#include <net/tcp.h>
  34#include <net/smc.h>
  35#include <asm/ioctls.h>
  36
  37#include <net/net_namespace.h>
  38#include <net/netns/generic.h>
  39#include "smc_netns.h"
  40
  41#include "smc.h"
  42#include "smc_clc.h"
  43#include "smc_llc.h"
  44#include "smc_cdc.h"
  45#include "smc_core.h"
  46#include "smc_ib.h"
  47#include "smc_ism.h"
  48#include "smc_pnet.h"
  49#include "smc_netlink.h"
  50#include "smc_tx.h"
  51#include "smc_rx.h"
  52#include "smc_close.h"
  53#include "smc_stats.h"
  54#include "smc_tracepoint.h"
  55#include "smc_sysctl.h"
 
 
  56
  57static DEFINE_MUTEX(smc_server_lgr_pending);	/* serialize link group
  58						 * creation on server
  59						 */
  60static DEFINE_MUTEX(smc_client_lgr_pending);	/* serialize link group
  61						 * creation on client
  62						 */
  63
  64static struct workqueue_struct	*smc_tcp_ls_wq;	/* wq for tcp listen work */
  65struct workqueue_struct	*smc_hs_wq;	/* wq for handshake work */
  66struct workqueue_struct	*smc_close_wq;	/* wq for close work */
  67
  68static void smc_tcp_listen_work(struct work_struct *);
  69static void smc_connect_work(struct work_struct *);
  70
  71int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
  72{
  73	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
  74	void *hdr;
  75
  76	if (cb_ctx->pos[0])
  77		goto out;
  78
  79	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  80			  &smc_gen_nl_family, NLM_F_MULTI,
  81			  SMC_NETLINK_DUMP_HS_LIMITATION);
  82	if (!hdr)
  83		return -ENOMEM;
  84
  85	if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED,
  86		       sock_net(skb->sk)->smc.limit_smc_hs))
  87		goto err;
  88
  89	genlmsg_end(skb, hdr);
  90	cb_ctx->pos[0] = 1;
  91out:
  92	return skb->len;
  93err:
  94	genlmsg_cancel(skb, hdr);
  95	return -EMSGSIZE;
  96}
  97
  98int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
  99{
 100	sock_net(skb->sk)->smc.limit_smc_hs = true;
 101	return 0;
 102}
 103
 104int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
 105{
 106	sock_net(skb->sk)->smc.limit_smc_hs = false;
 107	return 0;
 108}
 109
 110static void smc_set_keepalive(struct sock *sk, int val)
 111{
 112	struct smc_sock *smc = smc_sk(sk);
 113
 114	smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
 115}
 116
 117static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
 118					  struct sk_buff *skb,
 119					  struct request_sock *req,
 120					  struct dst_entry *dst,
 121					  struct request_sock *req_unhash,
 122					  bool *own_req)
 123{
 124	struct smc_sock *smc;
 125	struct sock *child;
 126
 127	smc = smc_clcsock_user_data(sk);
 128
 129	if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
 130				sk->sk_max_ack_backlog)
 131		goto drop;
 132
 133	if (sk_acceptq_is_full(&smc->sk)) {
 134		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 135		goto drop;
 136	}
 137
 138	/* passthrough to original syn recv sock fct */
 139	child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
 140					       own_req);
 141	/* child must not inherit smc or its ops */
 142	if (child) {
 143		rcu_assign_sk_user_data(child, NULL);
 144
 145		/* v4-mapped sockets don't inherit parent ops. Don't restore. */
 146		if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
 147			inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
 148	}
 149	return child;
 150
 151drop:
 152	dst_release(dst);
 153	tcp_listendrop(sk);
 154	return NULL;
 155}
 156
 157static bool smc_hs_congested(const struct sock *sk)
 158{
 159	const struct smc_sock *smc;
 160
 161	smc = smc_clcsock_user_data(sk);
 162
 163	if (!smc)
 164		return true;
 165
 166	if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
 167		return true;
 168
 169	return false;
 170}
 171
 172static struct smc_hashinfo smc_v4_hashinfo = {
 173	.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
 174};
 175
 176static struct smc_hashinfo smc_v6_hashinfo = {
 177	.lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
 178};
 179
 180int smc_hash_sk(struct sock *sk)
 181{
 182	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
 183	struct hlist_head *head;
 184
 185	head = &h->ht;
 186
 187	write_lock_bh(&h->lock);
 188	sk_add_node(sk, head);
 189	write_unlock_bh(&h->lock);
 190	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 191
 192	return 0;
 193}
 194EXPORT_SYMBOL_GPL(smc_hash_sk);
 195
 196void smc_unhash_sk(struct sock *sk)
 197{
 198	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
 199
 200	write_lock_bh(&h->lock);
 201	if (sk_del_node_init(sk))
 202		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 203	write_unlock_bh(&h->lock);
 204}
 205EXPORT_SYMBOL_GPL(smc_unhash_sk);
 206
 207/* This will be called before user really release sock_lock. So do the
 208 * work which we didn't do because of user hold the sock_lock in the
 209 * BH context
 210 */
 211static void smc_release_cb(struct sock *sk)
 212{
 213	struct smc_sock *smc = smc_sk(sk);
 214
 215	if (smc->conn.tx_in_release_sock) {
 216		smc_tx_pending(&smc->conn);
 217		smc->conn.tx_in_release_sock = false;
 218	}
 219}
 220
 221struct proto smc_proto = {
 222	.name		= "SMC",
 223	.owner		= THIS_MODULE,
 224	.keepalive	= smc_set_keepalive,
 225	.hash		= smc_hash_sk,
 226	.unhash		= smc_unhash_sk,
 227	.release_cb	= smc_release_cb,
 228	.obj_size	= sizeof(struct smc_sock),
 229	.h.smc_hash	= &smc_v4_hashinfo,
 230	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
 231};
 232EXPORT_SYMBOL_GPL(smc_proto);
 233
 234struct proto smc_proto6 = {
 235	.name		= "SMC6",
 236	.owner		= THIS_MODULE,
 237	.keepalive	= smc_set_keepalive,
 238	.hash		= smc_hash_sk,
 239	.unhash		= smc_unhash_sk,
 240	.release_cb	= smc_release_cb,
 241	.obj_size	= sizeof(struct smc_sock),
 242	.h.smc_hash	= &smc_v6_hashinfo,
 243	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
 244};
 245EXPORT_SYMBOL_GPL(smc_proto6);
 246
 247static void smc_fback_restore_callbacks(struct smc_sock *smc)
 248{
 249	struct sock *clcsk = smc->clcsock->sk;
 250
 251	write_lock_bh(&clcsk->sk_callback_lock);
 252	clcsk->sk_user_data = NULL;
 253
 254	smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
 255	smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
 256	smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
 257	smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
 258
 259	write_unlock_bh(&clcsk->sk_callback_lock);
 260}
 261
 262static void smc_restore_fallback_changes(struct smc_sock *smc)
 263{
 264	if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
 265		smc->clcsock->file->private_data = smc->sk.sk_socket;
 266		smc->clcsock->file = NULL;
 267		smc_fback_restore_callbacks(smc);
 268	}
 269}
 270
 271static int __smc_release(struct smc_sock *smc)
 272{
 273	struct sock *sk = &smc->sk;
 274	int rc = 0;
 275
 276	if (!smc->use_fallback) {
 277		rc = smc_close_active(smc);
 278		smc_sock_set_flag(sk, SOCK_DEAD);
 279		sk->sk_shutdown |= SHUTDOWN_MASK;
 280	} else {
 281		if (sk->sk_state != SMC_CLOSED) {
 282			if (sk->sk_state != SMC_LISTEN &&
 283			    sk->sk_state != SMC_INIT)
 284				sock_put(sk); /* passive closing */
 285			if (sk->sk_state == SMC_LISTEN) {
 286				/* wake up clcsock accept */
 287				rc = kernel_sock_shutdown(smc->clcsock,
 288							  SHUT_RDWR);
 289			}
 290			sk->sk_state = SMC_CLOSED;
 291			sk->sk_state_change(sk);
 292		}
 293		smc_restore_fallback_changes(smc);
 294	}
 295
 296	sk->sk_prot->unhash(sk);
 297
 298	if (sk->sk_state == SMC_CLOSED) {
 299		if (smc->clcsock) {
 300			release_sock(sk);
 301			smc_clcsock_release(smc);
 302			lock_sock(sk);
 303		}
 304		if (!smc->use_fallback)
 305			smc_conn_free(&smc->conn);
 306	}
 307
 308	return rc;
 309}
 310
 311static int smc_release(struct socket *sock)
 312{
 313	struct sock *sk = sock->sk;
 314	struct smc_sock *smc;
 315	int old_state, rc = 0;
 316
 317	if (!sk)
 318		goto out;
 319
 320	sock_hold(sk); /* sock_put below */
 321	smc = smc_sk(sk);
 322
 323	old_state = sk->sk_state;
 324
 325	/* cleanup for a dangling non-blocking connect */
 326	if (smc->connect_nonblock && old_state == SMC_INIT)
 327		tcp_abort(smc->clcsock->sk, ECONNABORTED);
 328
 329	if (cancel_work_sync(&smc->connect_work))
 330		sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
 331
 332	if (sk->sk_state == SMC_LISTEN)
 333		/* smc_close_non_accepted() is called and acquires
 334		 * sock lock for child sockets again
 335		 */
 336		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 337	else
 338		lock_sock(sk);
 339
 340	if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
 341	    !smc->use_fallback)
 342		smc_close_active_abort(smc);
 343
 344	rc = __smc_release(smc);
 345
 346	/* detach socket */
 347	sock_orphan(sk);
 348	sock->sk = NULL;
 349	release_sock(sk);
 350
 351	sock_put(sk); /* sock_hold above */
 352	sock_put(sk); /* final sock_put */
 353out:
 354	return rc;
 355}
 356
 357static void smc_destruct(struct sock *sk)
 358{
 359	if (sk->sk_state != SMC_CLOSED)
 360		return;
 361	if (!sock_flag(sk, SOCK_DEAD))
 362		return;
 363}
 364
 365static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
 366				   int protocol)
 367{
 368	struct smc_sock *smc;
 369	struct proto *prot;
 370	struct sock *sk;
 371
 372	prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
 373	sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
 374	if (!sk)
 375		return NULL;
 376
 377	sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
 378	sk->sk_state = SMC_INIT;
 379	sk->sk_destruct = smc_destruct;
 380	sk->sk_protocol = protocol;
 381	WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
 382	WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
 383	smc = smc_sk(sk);
 384	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 385	INIT_WORK(&smc->connect_work, smc_connect_work);
 386	INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
 387	INIT_LIST_HEAD(&smc->accept_q);
 388	spin_lock_init(&smc->accept_q_lock);
 389	spin_lock_init(&smc->conn.send_lock);
 390	sk->sk_prot->hash(sk);
 391	mutex_init(&smc->clcsock_release_lock);
 392	smc_init_saved_callbacks(smc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 393
 394	return sk;
 395}
 396
 397static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
 398		    int addr_len)
 399{
 400	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
 401	struct sock *sk = sock->sk;
 402	struct smc_sock *smc;
 403	int rc;
 404
 405	smc = smc_sk(sk);
 406
 407	/* replicate tests from inet_bind(), to be safe wrt. future changes */
 408	rc = -EINVAL;
 409	if (addr_len < sizeof(struct sockaddr_in))
 410		goto out;
 411
 412	rc = -EAFNOSUPPORT;
 413	if (addr->sin_family != AF_INET &&
 414	    addr->sin_family != AF_INET6 &&
 415	    addr->sin_family != AF_UNSPEC)
 416		goto out;
 417	/* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
 418	if (addr->sin_family == AF_UNSPEC &&
 419	    addr->sin_addr.s_addr != htonl(INADDR_ANY))
 420		goto out;
 421
 422	lock_sock(sk);
 423
 424	/* Check if socket is already active */
 425	rc = -EINVAL;
 426	if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
 427		goto out_rel;
 428
 429	smc->clcsock->sk->sk_reuse = sk->sk_reuse;
 430	smc->clcsock->sk->sk_reuseport = sk->sk_reuseport;
 431	rc = kernel_bind(smc->clcsock, uaddr, addr_len);
 432
 433out_rel:
 434	release_sock(sk);
 435out:
 436	return rc;
 437}
 438
 439/* copy only relevant settings and flags of SOL_SOCKET level from smc to
 440 * clc socket (since smc is not called for these options from net/core)
 441 */
 442
 443#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
 444			     (1UL << SOCK_KEEPOPEN) | \
 445			     (1UL << SOCK_LINGER) | \
 446			     (1UL << SOCK_BROADCAST) | \
 447			     (1UL << SOCK_TIMESTAMP) | \
 448			     (1UL << SOCK_DBG) | \
 449			     (1UL << SOCK_RCVTSTAMP) | \
 450			     (1UL << SOCK_RCVTSTAMPNS) | \
 451			     (1UL << SOCK_LOCALROUTE) | \
 452			     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
 453			     (1UL << SOCK_RXQ_OVFL) | \
 454			     (1UL << SOCK_WIFI_STATUS) | \
 455			     (1UL << SOCK_NOFCS) | \
 456			     (1UL << SOCK_FILTER_LOCKED) | \
 457			     (1UL << SOCK_TSTAMP_NEW))
 458
 459/* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */
 460static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
 461				     unsigned long mask)
 462{
 463	struct net *nnet = sock_net(nsk);
 464
 465	nsk->sk_userlocks = osk->sk_userlocks;
 466	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
 467		nsk->sk_sndbuf = osk->sk_sndbuf;
 468	} else {
 469		if (mask == SK_FLAGS_SMC_TO_CLC)
 470			WRITE_ONCE(nsk->sk_sndbuf,
 471				   READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
 472		else
 473			WRITE_ONCE(nsk->sk_sndbuf,
 474				   2 * READ_ONCE(nnet->smc.sysctl_wmem));
 475	}
 476	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
 477		nsk->sk_rcvbuf = osk->sk_rcvbuf;
 478	} else {
 479		if (mask == SK_FLAGS_SMC_TO_CLC)
 480			WRITE_ONCE(nsk->sk_rcvbuf,
 481				   READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
 482		else
 483			WRITE_ONCE(nsk->sk_rcvbuf,
 484				   2 * READ_ONCE(nnet->smc.sysctl_rmem));
 485	}
 486}
 487
 488static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
 489				   unsigned long mask)
 490{
 491	/* options we don't get control via setsockopt for */
 492	nsk->sk_type = osk->sk_type;
 493	nsk->sk_sndtimeo = osk->sk_sndtimeo;
 494	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
 495	nsk->sk_mark = READ_ONCE(osk->sk_mark);
 496	nsk->sk_priority = READ_ONCE(osk->sk_priority);
 497	nsk->sk_rcvlowat = osk->sk_rcvlowat;
 498	nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
 499	nsk->sk_err = osk->sk_err;
 500
 501	nsk->sk_flags &= ~mask;
 502	nsk->sk_flags |= osk->sk_flags & mask;
 503
 504	smc_adjust_sock_bufsizes(nsk, osk, mask);
 505}
 506
 507static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
 508{
 509	smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
 510}
 511
 512#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
 513			     (1UL << SOCK_KEEPOPEN) | \
 514			     (1UL << SOCK_LINGER) | \
 515			     (1UL << SOCK_DBG))
 516/* copy only settings and flags relevant for smc from clc to smc socket */
 517static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
 518{
 519	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
 520}
 521
 522/* register the new vzalloced sndbuf on all links */
 523static int smcr_lgr_reg_sndbufs(struct smc_link *link,
 524				struct smc_buf_desc *snd_desc)
 525{
 526	struct smc_link_group *lgr = link->lgr;
 527	int i, rc = 0;
 528
 529	if (!snd_desc->is_vm)
 530		return -EINVAL;
 531
 532	/* protect against parallel smcr_link_reg_buf() */
 533	down_write(&lgr->llc_conf_mutex);
 534	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 535		if (!smc_link_active(&lgr->lnk[i]))
 536			continue;
 537		rc = smcr_link_reg_buf(&lgr->lnk[i], snd_desc);
 538		if (rc)
 539			break;
 540	}
 541	up_write(&lgr->llc_conf_mutex);
 542	return rc;
 543}
 544
 545/* register the new rmb on all links */
 546static int smcr_lgr_reg_rmbs(struct smc_link *link,
 547			     struct smc_buf_desc *rmb_desc)
 548{
 549	struct smc_link_group *lgr = link->lgr;
 550	bool do_slow = false;
 551	int i, rc = 0;
 552
 553	rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
 554	if (rc)
 555		return rc;
 556
 557	down_read(&lgr->llc_conf_mutex);
 558	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 559		if (!smc_link_active(&lgr->lnk[i]))
 560			continue;
 561		if (!rmb_desc->is_reg_mr[link->link_idx]) {
 562			up_read(&lgr->llc_conf_mutex);
 563			goto slow_path;
 564		}
 565	}
 566	/* mr register already */
 567	goto fast_path;
 568slow_path:
 569	do_slow = true;
 570	/* protect against parallel smc_llc_cli_rkey_exchange() and
 571	 * parallel smcr_link_reg_buf()
 572	 */
 573	down_write(&lgr->llc_conf_mutex);
 574	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 575		if (!smc_link_active(&lgr->lnk[i]))
 576			continue;
 577		rc = smcr_link_reg_buf(&lgr->lnk[i], rmb_desc);
 578		if (rc)
 579			goto out;
 580	}
 581fast_path:
 582	/* exchange confirm_rkey msg with peer */
 583	rc = smc_llc_do_confirm_rkey(link, rmb_desc);
 584	if (rc) {
 585		rc = -EFAULT;
 586		goto out;
 587	}
 588	rmb_desc->is_conf_rkey = true;
 589out:
 590	do_slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex);
 591	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
 592	return rc;
 593}
 594
 595static int smcr_clnt_conf_first_link(struct smc_sock *smc)
 596{
 597	struct smc_link *link = smc->conn.lnk;
 598	struct smc_llc_qentry *qentry;
 599	int rc;
 600
 601	/* Receive CONFIRM LINK request from server over RoCE fabric.
 602	 * Increasing the client's timeout by twice as much as the server's
 603	 * timeout by default can temporarily avoid decline messages of
 604	 * both sides crossing or colliding
 605	 */
 606	qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
 607			      SMC_LLC_CONFIRM_LINK);
 608	if (!qentry) {
 609		struct smc_clc_msg_decline dclc;
 610
 611		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
 612				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
 613		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
 614	}
 615	smc_llc_save_peer_uid(qentry);
 616	rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
 617	smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
 618	if (rc)
 619		return SMC_CLC_DECL_RMBE_EC;
 620
 621	rc = smc_ib_modify_qp_rts(link);
 622	if (rc)
 623		return SMC_CLC_DECL_ERR_RDYLNK;
 624
 625	smc_wr_remember_qp_attr(link);
 626
 627	/* reg the sndbuf if it was vzalloced */
 628	if (smc->conn.sndbuf_desc->is_vm) {
 629		if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
 630			return SMC_CLC_DECL_ERR_REGBUF;
 631	}
 632
 633	/* reg the rmb */
 634	if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
 635		return SMC_CLC_DECL_ERR_REGBUF;
 636
 637	/* confirm_rkey is implicit on 1st contact */
 638	smc->conn.rmb_desc->is_conf_rkey = true;
 639
 640	/* send CONFIRM LINK response over RoCE fabric */
 641	rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
 642	if (rc < 0)
 643		return SMC_CLC_DECL_TIMEOUT_CL;
 644
 645	smc_llc_link_active(link);
 646	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
 647
 648	if (link->lgr->max_links > 1) {
 649		/* optional 2nd link, receive ADD LINK request from server */
 650		qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
 651				      SMC_LLC_ADD_LINK);
 652		if (!qentry) {
 653			struct smc_clc_msg_decline dclc;
 654
 655			rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
 656					      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
 657			if (rc == -EAGAIN)
 658				rc = 0; /* no DECLINE received, go with one link */
 659			return rc;
 660		}
 661		smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
 662		smc_llc_cli_add_link(link, qentry);
 663	}
 664	return 0;
 665}
 666
 667static bool smc_isascii(char *hostname)
 668{
 669	int i;
 670
 671	for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
 672		if (!isascii(hostname[i]))
 673			return false;
 674	return true;
 675}
 676
 677static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
 678					struct smc_clc_msg_accept_confirm *clc)
 679{
 680	struct smc_clc_first_contact_ext *fce;
 681	int clc_v2_len;
 682
 683	if (clc->hdr.version == SMC_V1 ||
 684	    !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
 685		return;
 686
 687	if (smc->conn.lgr->is_smcd) {
 688		memcpy(smc->conn.lgr->negotiated_eid, clc->d1.eid,
 689		       SMC_MAX_EID_LEN);
 690		clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, d1);
 691	} else {
 692		memcpy(smc->conn.lgr->negotiated_eid, clc->r1.eid,
 693		       SMC_MAX_EID_LEN);
 694		clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, r1);
 695	}
 696	fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc) + clc_v2_len);
 697	smc->conn.lgr->peer_os = fce->os_type;
 698	smc->conn.lgr->peer_smc_release = fce->release;
 699	if (smc_isascii(fce->hostname))
 700		memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
 701		       SMC_MAX_HOSTNAME_LEN);
 702}
 703
 704static void smcr_conn_save_peer_info(struct smc_sock *smc,
 705				     struct smc_clc_msg_accept_confirm *clc)
 706{
 707	int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
 708
 709	smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
 710	smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
 711	smc->conn.peer_rmbe_size = bufsize;
 712	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
 713	smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
 714}
 715
 716static void smcd_conn_save_peer_info(struct smc_sock *smc,
 717				     struct smc_clc_msg_accept_confirm *clc)
 718{
 719	int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
 720
 721	smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
 722	smc->conn.peer_token = ntohll(clc->d0.token);
 723	/* msg header takes up space in the buffer */
 724	smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
 725	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
 726	smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
 727}
 728
 729static void smc_conn_save_peer_info(struct smc_sock *smc,
 730				    struct smc_clc_msg_accept_confirm *clc)
 731{
 732	if (smc->conn.lgr->is_smcd)
 733		smcd_conn_save_peer_info(smc, clc);
 734	else
 735		smcr_conn_save_peer_info(smc, clc);
 736	smc_conn_save_peer_info_fce(smc, clc);
 737}
 738
 739static void smc_link_save_peer_info(struct smc_link *link,
 740				    struct smc_clc_msg_accept_confirm *clc,
 741				    struct smc_init_info *ini)
 742{
 743	link->peer_qpn = ntoh24(clc->r0.qpn);
 744	memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
 745	memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
 746	link->peer_psn = ntoh24(clc->r0.psn);
 747	link->peer_mtu = clc->r0.qp_mtu;
 748}
 749
 750static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
 751				       struct smc_stats_fback *fback_arr)
 752{
 753	int cnt;
 754
 755	for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
 756		if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
 757			fback_arr[cnt].count++;
 758			break;
 759		}
 760		if (!fback_arr[cnt].fback_code) {
 761			fback_arr[cnt].fback_code = smc->fallback_rsn;
 762			fback_arr[cnt].count++;
 763			break;
 764		}
 765	}
 766}
 767
 768static void smc_stat_fallback(struct smc_sock *smc)
 769{
 770	struct net *net = sock_net(&smc->sk);
 771
 772	mutex_lock(&net->smc.mutex_fback_rsn);
 773	if (smc->listen_smc) {
 774		smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
 775		net->smc.fback_rsn->srv_fback_cnt++;
 776	} else {
 777		smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
 778		net->smc.fback_rsn->clnt_fback_cnt++;
 779	}
 780	mutex_unlock(&net->smc.mutex_fback_rsn);
 781}
 782
 783/* must be called under rcu read lock */
 784static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
 785{
 786	struct socket_wq *wq;
 787	__poll_t flags;
 788
 789	wq = rcu_dereference(smc->sk.sk_wq);
 790	if (!skwq_has_sleeper(wq))
 791		return;
 792
 793	/* wake up smc sk->sk_wq */
 794	if (!key) {
 795		/* sk_state_change */
 796		wake_up_interruptible_all(&wq->wait);
 797	} else {
 798		flags = key_to_poll(key);
 799		if (flags & (EPOLLIN | EPOLLOUT))
 800			/* sk_data_ready or sk_write_space */
 801			wake_up_interruptible_sync_poll(&wq->wait, flags);
 802		else if (flags & EPOLLERR)
 803			/* sk_error_report */
 804			wake_up_interruptible_poll(&wq->wait, flags);
 805	}
 806}
 807
 808static int smc_fback_mark_woken(wait_queue_entry_t *wait,
 809				unsigned int mode, int sync, void *key)
 810{
 811	struct smc_mark_woken *mark =
 812		container_of(wait, struct smc_mark_woken, wait_entry);
 813
 814	mark->woken = true;
 815	mark->key = key;
 816	return 0;
 817}
 818
 819static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
 820				     void (*clcsock_callback)(struct sock *sk))
 821{
 822	struct smc_mark_woken mark = { .woken = false };
 823	struct socket_wq *wq;
 824
 825	init_waitqueue_func_entry(&mark.wait_entry,
 826				  smc_fback_mark_woken);
 827	rcu_read_lock();
 828	wq = rcu_dereference(clcsk->sk_wq);
 829	if (!wq)
 830		goto out;
 831	add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
 832	clcsock_callback(clcsk);
 833	remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
 834
 835	if (mark.woken)
 836		smc_fback_wakeup_waitqueue(smc, mark.key);
 837out:
 838	rcu_read_unlock();
 839}
 840
 841static void smc_fback_state_change(struct sock *clcsk)
 842{
 843	struct smc_sock *smc;
 844
 845	read_lock_bh(&clcsk->sk_callback_lock);
 846	smc = smc_clcsock_user_data(clcsk);
 847	if (smc)
 848		smc_fback_forward_wakeup(smc, clcsk,
 849					 smc->clcsk_state_change);
 850	read_unlock_bh(&clcsk->sk_callback_lock);
 851}
 852
 853static void smc_fback_data_ready(struct sock *clcsk)
 854{
 855	struct smc_sock *smc;
 856
 857	read_lock_bh(&clcsk->sk_callback_lock);
 858	smc = smc_clcsock_user_data(clcsk);
 859	if (smc)
 860		smc_fback_forward_wakeup(smc, clcsk,
 861					 smc->clcsk_data_ready);
 862	read_unlock_bh(&clcsk->sk_callback_lock);
 863}
 864
 865static void smc_fback_write_space(struct sock *clcsk)
 866{
 867	struct smc_sock *smc;
 868
 869	read_lock_bh(&clcsk->sk_callback_lock);
 870	smc = smc_clcsock_user_data(clcsk);
 871	if (smc)
 872		smc_fback_forward_wakeup(smc, clcsk,
 873					 smc->clcsk_write_space);
 874	read_unlock_bh(&clcsk->sk_callback_lock);
 875}
 876
 877static void smc_fback_error_report(struct sock *clcsk)
 878{
 879	struct smc_sock *smc;
 880
 881	read_lock_bh(&clcsk->sk_callback_lock);
 882	smc = smc_clcsock_user_data(clcsk);
 883	if (smc)
 884		smc_fback_forward_wakeup(smc, clcsk,
 885					 smc->clcsk_error_report);
 886	read_unlock_bh(&clcsk->sk_callback_lock);
 887}
 888
 889static void smc_fback_replace_callbacks(struct smc_sock *smc)
 890{
 891	struct sock *clcsk = smc->clcsock->sk;
 892
 893	write_lock_bh(&clcsk->sk_callback_lock);
 894	clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
 895
 896	smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
 897			       &smc->clcsk_state_change);
 898	smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
 899			       &smc->clcsk_data_ready);
 900	smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
 901			       &smc->clcsk_write_space);
 902	smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
 903			       &smc->clcsk_error_report);
 904
 905	write_unlock_bh(&clcsk->sk_callback_lock);
 906}
 907
 908static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
 909{
 910	int rc = 0;
 911
 912	mutex_lock(&smc->clcsock_release_lock);
 913	if (!smc->clcsock) {
 914		rc = -EBADF;
 915		goto out;
 916	}
 917
 918	smc->use_fallback = true;
 919	smc->fallback_rsn = reason_code;
 920	smc_stat_fallback(smc);
 921	trace_smc_switch_to_fallback(smc, reason_code);
 922	if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
 923		smc->clcsock->file = smc->sk.sk_socket->file;
 924		smc->clcsock->file->private_data = smc->clcsock;
 925		smc->clcsock->wq.fasync_list =
 926			smc->sk.sk_socket->wq.fasync_list;
 927		smc->sk.sk_socket->wq.fasync_list = NULL;
 928
 929		/* There might be some wait entries remaining
 930		 * in smc sk->sk_wq and they should be woken up
 931		 * as clcsock's wait queue is woken up.
 932		 */
 933		smc_fback_replace_callbacks(smc);
 934	}
 935out:
 936	mutex_unlock(&smc->clcsock_release_lock);
 937	return rc;
 938}
 939
 940/* fall back during connect */
 941static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
 942{
 943	struct net *net = sock_net(&smc->sk);
 944	int rc = 0;
 945
 946	rc = smc_switch_to_fallback(smc, reason_code);
 947	if (rc) { /* fallback fails */
 948		this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
 949		if (smc->sk.sk_state == SMC_INIT)
 950			sock_put(&smc->sk); /* passive closing */
 951		return rc;
 952	}
 953	smc_copy_sock_settings_to_clc(smc);
 954	smc->connect_nonblock = 0;
 955	if (smc->sk.sk_state == SMC_INIT)
 956		smc->sk.sk_state = SMC_ACTIVE;
 957	return 0;
 958}
 959
 960/* decline and fall back during connect */
 961static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
 962					u8 version)
 963{
 964	struct net *net = sock_net(&smc->sk);
 965	int rc;
 966
 967	if (reason_code < 0) { /* error, fallback is not possible */
 968		this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
 969		if (smc->sk.sk_state == SMC_INIT)
 970			sock_put(&smc->sk); /* passive closing */
 971		return reason_code;
 972	}
 973	if (reason_code != SMC_CLC_DECL_PEERDECL) {
 974		rc = smc_clc_send_decline(smc, reason_code, version);
 975		if (rc < 0) {
 976			this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
 977			if (smc->sk.sk_state == SMC_INIT)
 978				sock_put(&smc->sk); /* passive closing */
 979			return rc;
 980		}
 981	}
 982	return smc_connect_fallback(smc, reason_code);
 983}
 984
 985static void smc_conn_abort(struct smc_sock *smc, int local_first)
 986{
 987	struct smc_connection *conn = &smc->conn;
 988	struct smc_link_group *lgr = conn->lgr;
 989	bool lgr_valid = false;
 990
 991	if (smc_conn_lgr_valid(conn))
 992		lgr_valid = true;
 993
 994	smc_conn_free(conn);
 995	if (local_first && lgr_valid)
 996		smc_lgr_cleanup_early(lgr);
 997}
 998
 999/* check if there is a rdma device available for this connection. */
1000/* called for connect and listen */
1001static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
1002{
1003	/* PNET table look up: search active ib_device and port
1004	 * within same PNETID that also contains the ethernet device
1005	 * used for the internal TCP socket
1006	 */
1007	smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
1008	if (!ini->check_smcrv2 && !ini->ib_dev)
1009		return SMC_CLC_DECL_NOSMCRDEV;
1010	if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
1011		return SMC_CLC_DECL_NOSMCRDEV;
1012	return 0;
1013}
1014
1015/* check if there is an ISM device available for this connection. */
1016/* called for connect and listen */
1017static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
1018{
1019	/* Find ISM device with same PNETID as connecting interface  */
1020	smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
1021	if (!ini->ism_dev[0])
1022		return SMC_CLC_DECL_NOSMCDDEV;
1023	else
1024		ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
1025	return 0;
1026}
1027
1028/* is chid unique for the ism devices that are already determined? */
1029static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
1030					   int cnt)
1031{
1032	int i = (!ini->ism_dev[0]) ? 1 : 0;
1033
1034	for (; i < cnt; i++)
1035		if (ini->ism_chid[i] == chid)
1036			return false;
1037	return true;
1038}
1039
1040/* determine possible V2 ISM devices (either without PNETID or with PNETID plus
1041 * PNETID matching net_device)
1042 */
1043static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
1044				       struct smc_init_info *ini)
1045{
1046	int rc = SMC_CLC_DECL_NOSMCDDEV;
1047	struct smcd_dev *smcd;
1048	int i = 1, entry = 1;
1049	bool is_virtual;
1050	u16 chid;
1051
1052	if (smcd_indicated(ini->smc_type_v1))
1053		rc = 0;		/* already initialized for V1 */
1054	mutex_lock(&smcd_dev_list.mutex);
1055	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1056		if (smcd->going_away || smcd == ini->ism_dev[0])
1057			continue;
1058		chid = smc_ism_get_chid(smcd);
1059		if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
1060			continue;
1061		is_virtual = __smc_ism_is_virtual(chid);
1062		if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
1063		    smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
1064			if (is_virtual && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
1065				/* It's the last GID-CHID entry left in CLC
1066				 * Proposal SMC-Dv2 extension, but a virtual
1067				 * ISM device will take two entries. So give
1068				 * up it and try the next potential ISM device.
1069				 */
1070				continue;
1071			ini->ism_dev[i] = smcd;
1072			ini->ism_chid[i] = chid;
1073			ini->is_smcd = true;
1074			rc = 0;
1075			i++;
1076			entry = is_virtual ? entry + 2 : entry + 1;
1077			if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
1078				break;
1079		}
1080	}
1081	mutex_unlock(&smcd_dev_list.mutex);
1082	ini->ism_offered_cnt = i - 1;
1083	if (!ini->ism_dev[0] && !ini->ism_dev[1])
1084		ini->smcd_version = 0;
1085
1086	return rc;
1087}
1088
1089/* Check for VLAN ID and register it on ISM device just for CLC handshake */
1090static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
1091				      struct smc_init_info *ini)
1092{
1093	if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
1094		return SMC_CLC_DECL_ISMVLANERR;
1095	return 0;
1096}
1097
1098static int smc_find_proposal_devices(struct smc_sock *smc,
1099				     struct smc_init_info *ini)
1100{
1101	int rc = 0;
1102
1103	/* check if there is an ism device available */
1104	if (!(ini->smcd_version & SMC_V1) ||
1105	    smc_find_ism_device(smc, ini) ||
1106	    smc_connect_ism_vlan_setup(smc, ini))
1107		ini->smcd_version &= ~SMC_V1;
1108	/* else ISM V1 is supported for this connection */
1109
1110	/* check if there is an rdma device available */
1111	if (!(ini->smcr_version & SMC_V1) ||
1112	    smc_find_rdma_device(smc, ini))
1113		ini->smcr_version &= ~SMC_V1;
1114	/* else RDMA is supported for this connection */
1115
1116	ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
1117					      ini->smcr_version & SMC_V1);
1118
1119	/* check if there is an ism v2 device available */
1120	if (!(ini->smcd_version & SMC_V2) ||
1121	    !smc_ism_is_v2_capable() ||
1122	    smc_find_ism_v2_device_clnt(smc, ini))
1123		ini->smcd_version &= ~SMC_V2;
1124
1125	/* check if there is an rdma v2 device available */
1126	ini->check_smcrv2 = true;
1127	ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
1128	if (!(ini->smcr_version & SMC_V2) ||
1129	    smc->clcsock->sk->sk_family != AF_INET ||
1130	    !smc_clc_ueid_count() ||
1131	    smc_find_rdma_device(smc, ini))
1132		ini->smcr_version &= ~SMC_V2;
1133	ini->check_smcrv2 = false;
1134
1135	ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
1136					      ini->smcr_version & SMC_V2);
1137
1138	/* if neither ISM nor RDMA are supported, fallback */
1139	if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
1140		rc = SMC_CLC_DECL_NOSMCDEV;
1141
1142	return rc;
1143}
1144
1145/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
1146 * used, the VLAN ID will be registered again during the connection setup.
1147 */
1148static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
1149					struct smc_init_info *ini)
1150{
1151	if (!smcd_indicated(ini->smc_type_v1))
1152		return 0;
1153	if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
1154		return SMC_CLC_DECL_CNFERR;
1155	return 0;
1156}
1157
1158#define SMC_CLC_MAX_ACCEPT_LEN \
1159	(sizeof(struct smc_clc_msg_accept_confirm) + \
1160	 sizeof(struct smc_clc_first_contact_ext_v2x) + \
1161	 sizeof(struct smc_clc_msg_trail))
1162
1163/* CLC handshake during connect */
1164static int smc_connect_clc(struct smc_sock *smc,
1165			   struct smc_clc_msg_accept_confirm *aclc,
1166			   struct smc_init_info *ini)
1167{
1168	int rc = 0;
1169
1170	/* do inband token exchange */
1171	rc = smc_clc_send_proposal(smc, ini);
1172	if (rc)
1173		return rc;
1174	/* receive SMC Accept CLC message */
1175	return smc_clc_wait_msg(smc, aclc, SMC_CLC_MAX_ACCEPT_LEN,
1176				SMC_CLC_ACCEPT, CLC_WAIT_TIME);
1177}
1178
1179void smc_fill_gid_list(struct smc_link_group *lgr,
1180		       struct smc_gidlist *gidlist,
1181		       struct smc_ib_device *known_dev, u8 *known_gid)
1182{
1183	struct smc_init_info *alt_ini = NULL;
1184
1185	memset(gidlist, 0, sizeof(*gidlist));
1186	memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
1187
1188	alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
1189	if (!alt_ini)
1190		goto out;
1191
1192	alt_ini->vlan_id = lgr->vlan_id;
1193	alt_ini->check_smcrv2 = true;
1194	alt_ini->smcrv2.saddr = lgr->saddr;
1195	smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
1196
1197	if (!alt_ini->smcrv2.ib_dev_v2)
1198		goto out;
1199
1200	memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
1201	       SMC_GID_SIZE);
1202
1203out:
1204	kfree(alt_ini);
1205}
1206
1207static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
1208				       struct smc_clc_msg_accept_confirm *aclc,
1209				       struct smc_init_info *ini)
1210{
1211	struct smc_clc_first_contact_ext *fce =
1212		smc_get_clc_first_contact_ext(aclc, false);
1213	struct net *net = sock_net(&smc->sk);
1214	int rc;
1215
1216	if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
1217		return 0;
1218
1219	if (fce->v2_direct) {
1220		memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
1221		ini->smcrv2.uses_gateway = false;
1222	} else {
1223		if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
1224				      smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
1225				      ini->smcrv2.nexthop_mac,
1226				      &ini->smcrv2.uses_gateway))
1227			return SMC_CLC_DECL_NOROUTE;
1228		if (!ini->smcrv2.uses_gateway) {
1229			/* mismatch: peer claims indirect, but its direct */
1230			return SMC_CLC_DECL_NOINDIRECT;
1231		}
1232	}
1233
1234	ini->release_nr = fce->release;
1235	rc = smc_clc_clnt_v2x_features_validate(fce, ini);
1236	if (rc)
1237		return rc;
1238
1239	return 0;
1240}
1241
1242/* setup for RDMA connection of client */
1243static int smc_connect_rdma(struct smc_sock *smc,
1244			    struct smc_clc_msg_accept_confirm *aclc,
1245			    struct smc_init_info *ini)
1246{
1247	int i, reason_code = 0;
1248	struct smc_link *link;
1249	u8 *eid = NULL;
1250
1251	ini->is_smcd = false;
1252	ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
1253	ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1254	memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
1255	memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
1256	memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
1257	ini->max_conns = SMC_CONN_PER_LGR_MAX;
1258	ini->max_links = SMC_LINKS_ADD_LNK_MAX;
1259
1260	reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
1261	if (reason_code)
1262		return reason_code;
1263
1264	mutex_lock(&smc_client_lgr_pending);
1265	reason_code = smc_conn_create(smc, ini);
1266	if (reason_code) {
1267		mutex_unlock(&smc_client_lgr_pending);
1268		return reason_code;
1269	}
1270
1271	smc_conn_save_peer_info(smc, aclc);
1272
1273	if (ini->first_contact_local) {
1274		link = smc->conn.lnk;
1275	} else {
1276		/* set link that was assigned by server */
1277		link = NULL;
1278		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1279			struct smc_link *l = &smc->conn.lgr->lnk[i];
1280
1281			if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
1282			    !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
1283				    SMC_GID_SIZE) &&
1284			    (aclc->hdr.version > SMC_V1 ||
1285			     !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
1286				     sizeof(l->peer_mac)))) {
1287				link = l;
1288				break;
1289			}
1290		}
1291		if (!link) {
1292			reason_code = SMC_CLC_DECL_NOSRVLINK;
1293			goto connect_abort;
1294		}
1295		smc_switch_link_and_count(&smc->conn, link);
1296	}
1297
1298	/* create send buffer and rmb */
1299	if (smc_buf_create(smc, false)) {
1300		reason_code = SMC_CLC_DECL_MEM;
1301		goto connect_abort;
1302	}
1303
1304	if (ini->first_contact_local)
1305		smc_link_save_peer_info(link, aclc, ini);
1306
1307	if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
1308		reason_code = SMC_CLC_DECL_ERR_RTOK;
1309		goto connect_abort;
1310	}
1311
1312	smc_close_init(smc);
1313	smc_rx_init(smc);
1314
1315	if (ini->first_contact_local) {
1316		if (smc_ib_ready_link(link)) {
1317			reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1318			goto connect_abort;
1319		}
1320	} else {
1321		/* reg sendbufs if they were vzalloced */
1322		if (smc->conn.sndbuf_desc->is_vm) {
1323			if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) {
1324				reason_code = SMC_CLC_DECL_ERR_REGBUF;
1325				goto connect_abort;
1326			}
1327		}
1328		if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
1329			reason_code = SMC_CLC_DECL_ERR_REGBUF;
1330			goto connect_abort;
1331		}
1332	}
1333
1334	if (aclc->hdr.version > SMC_V1) {
1335		eid = aclc->r1.eid;
1336		if (ini->first_contact_local)
1337			smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
1338					  link->smcibdev, link->gid);
1339	}
1340
1341	reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
1342					   aclc->hdr.version, eid, ini);
1343	if (reason_code)
1344		goto connect_abort;
1345
1346	smc_tx_init(smc);
1347
1348	if (ini->first_contact_local) {
1349		/* QP confirmation over RoCE fabric */
1350		smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1351		reason_code = smcr_clnt_conf_first_link(smc);
1352		smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1353		if (reason_code)
1354			goto connect_abort;
1355	}
1356	mutex_unlock(&smc_client_lgr_pending);
1357
1358	smc_copy_sock_settings_to_clc(smc);
1359	smc->connect_nonblock = 0;
1360	if (smc->sk.sk_state == SMC_INIT)
1361		smc->sk.sk_state = SMC_ACTIVE;
1362
1363	return 0;
1364connect_abort:
1365	smc_conn_abort(smc, ini->first_contact_local);
1366	mutex_unlock(&smc_client_lgr_pending);
1367	smc->connect_nonblock = 0;
1368
1369	return reason_code;
1370}
1371
1372/* The server has chosen one of the proposed ISM devices for the communication.
1373 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1374 */
1375static int
1376smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm *aclc,
1377			       struct smc_init_info *ini)
1378{
1379	int i;
1380
1381	for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1382		if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
1383			ini->ism_selected = i;
1384			return 0;
1385		}
1386	}
1387
1388	return -EPROTO;
1389}
1390
1391/* setup for ISM connection of client */
1392static int smc_connect_ism(struct smc_sock *smc,
1393			   struct smc_clc_msg_accept_confirm *aclc,
1394			   struct smc_init_info *ini)
1395{
1396	u8 *eid = NULL;
1397	int rc = 0;
1398
1399	ini->is_smcd = true;
1400	ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1401
1402	if (aclc->hdr.version == SMC_V2) {
1403		if (ini->first_contact_peer) {
1404			struct smc_clc_first_contact_ext *fce =
1405				smc_get_clc_first_contact_ext(aclc, true);
1406
1407			ini->release_nr = fce->release;
1408			rc = smc_clc_clnt_v2x_features_validate(fce, ini);
1409			if (rc)
1410				return rc;
1411		}
1412
1413		rc = smc_v2_determine_accepted_chid(aclc, ini);
1414		if (rc)
1415			return rc;
1416
1417		if (__smc_ism_is_virtual(ini->ism_chid[ini->ism_selected]))
1418			ini->ism_peer_gid[ini->ism_selected].gid_ext =
1419						ntohll(aclc->d1.gid_ext);
1420		/* for non-virtual ISM devices, peer gid_ext remains 0. */
1421	}
1422	ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
1423
1424	/* there is only one lgr role for SMC-D; use server lock */
1425	mutex_lock(&smc_server_lgr_pending);
1426	rc = smc_conn_create(smc, ini);
1427	if (rc) {
1428		mutex_unlock(&smc_server_lgr_pending);
1429		return rc;
1430	}
1431
1432	/* Create send and receive buffers */
1433	rc = smc_buf_create(smc, true);
1434	if (rc) {
1435		rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1436		goto connect_abort;
1437	}
1438
1439	smc_conn_save_peer_info(smc, aclc);
1440	smc_close_init(smc);
 
 
 
 
 
 
 
1441	smc_rx_init(smc);
1442	smc_tx_init(smc);
1443
1444	if (aclc->hdr.version > SMC_V1)
1445		eid = aclc->d1.eid;
1446
1447	rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1448				  aclc->hdr.version, eid, ini);
1449	if (rc)
1450		goto connect_abort;
1451	mutex_unlock(&smc_server_lgr_pending);
1452
1453	smc_copy_sock_settings_to_clc(smc);
1454	smc->connect_nonblock = 0;
1455	if (smc->sk.sk_state == SMC_INIT)
1456		smc->sk.sk_state = SMC_ACTIVE;
1457
1458	return 0;
1459connect_abort:
1460	smc_conn_abort(smc, ini->first_contact_local);
1461	mutex_unlock(&smc_server_lgr_pending);
1462	smc->connect_nonblock = 0;
1463
1464	return rc;
1465}
1466
1467/* check if received accept type and version matches a proposed one */
1468static int smc_connect_check_aclc(struct smc_init_info *ini,
1469				  struct smc_clc_msg_accept_confirm *aclc)
1470{
1471	if (aclc->hdr.typev1 != SMC_TYPE_R &&
1472	    aclc->hdr.typev1 != SMC_TYPE_D)
1473		return SMC_CLC_DECL_MODEUNSUPP;
1474
1475	if (aclc->hdr.version >= SMC_V2) {
1476		if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1477		     !smcr_indicated(ini->smc_type_v2)) ||
1478		    (aclc->hdr.typev1 == SMC_TYPE_D &&
1479		     !smcd_indicated(ini->smc_type_v2)))
1480			return SMC_CLC_DECL_MODEUNSUPP;
1481	} else {
1482		if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1483		     !smcr_indicated(ini->smc_type_v1)) ||
1484		    (aclc->hdr.typev1 == SMC_TYPE_D &&
1485		     !smcd_indicated(ini->smc_type_v1)))
1486			return SMC_CLC_DECL_MODEUNSUPP;
1487	}
1488
1489	return 0;
1490}
1491
1492/* perform steps before actually connecting */
1493static int __smc_connect(struct smc_sock *smc)
1494{
1495	u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1496	struct smc_clc_msg_accept_confirm *aclc;
1497	struct smc_init_info *ini = NULL;
1498	u8 *buf = NULL;
1499	int rc = 0;
1500
1501	if (smc->use_fallback)
1502		return smc_connect_fallback(smc, smc->fallback_rsn);
1503
1504	/* if peer has not signalled SMC-capability, fall back */
1505	if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1506		return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1507
1508	/* IPSec connections opt out of SMC optimizations */
1509	if (using_ipsec(smc))
1510		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1511						    version);
1512
1513	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1514	if (!ini)
1515		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1516						    version);
1517
1518	ini->smcd_version = SMC_V1 | SMC_V2;
1519	ini->smcr_version = SMC_V1 | SMC_V2;
1520	ini->smc_type_v1 = SMC_TYPE_B;
1521	ini->smc_type_v2 = SMC_TYPE_B;
1522
1523	/* get vlan id from IP device */
1524	if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1525		ini->smcd_version &= ~SMC_V1;
1526		ini->smcr_version = 0;
1527		ini->smc_type_v1 = SMC_TYPE_N;
1528		if (!ini->smcd_version) {
1529			rc = SMC_CLC_DECL_GETVLANERR;
1530			goto fallback;
1531		}
1532	}
1533
1534	rc = smc_find_proposal_devices(smc, ini);
1535	if (rc)
1536		goto fallback;
1537
1538	buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1539	if (!buf) {
1540		rc = SMC_CLC_DECL_MEM;
1541		goto fallback;
1542	}
1543	aclc = (struct smc_clc_msg_accept_confirm *)buf;
1544
1545	/* perform CLC handshake */
1546	rc = smc_connect_clc(smc, aclc, ini);
1547	if (rc) {
1548		/* -EAGAIN on timeout, see tcp_recvmsg() */
1549		if (rc == -EAGAIN) {
1550			rc = -ETIMEDOUT;
1551			smc->sk.sk_err = ETIMEDOUT;
1552		}
1553		goto vlan_cleanup;
1554	}
1555
1556	/* check if smc modes and versions of CLC proposal and accept match */
1557	rc = smc_connect_check_aclc(ini, aclc);
1558	version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1559	if (rc)
1560		goto vlan_cleanup;
1561
1562	/* depending on previous steps, connect using rdma or ism */
1563	if (aclc->hdr.typev1 == SMC_TYPE_R) {
1564		ini->smcr_version = version;
1565		rc = smc_connect_rdma(smc, aclc, ini);
1566	} else if (aclc->hdr.typev1 == SMC_TYPE_D) {
1567		ini->smcd_version = version;
1568		rc = smc_connect_ism(smc, aclc, ini);
1569	}
1570	if (rc)
1571		goto vlan_cleanup;
1572
1573	SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1574	smc_connect_ism_vlan_cleanup(smc, ini);
1575	kfree(buf);
1576	kfree(ini);
1577	return 0;
1578
1579vlan_cleanup:
1580	smc_connect_ism_vlan_cleanup(smc, ini);
1581	kfree(buf);
1582fallback:
1583	kfree(ini);
1584	return smc_connect_decline_fallback(smc, rc, version);
1585}
1586
1587static void smc_connect_work(struct work_struct *work)
1588{
1589	struct smc_sock *smc = container_of(work, struct smc_sock,
1590					    connect_work);
1591	long timeo = smc->sk.sk_sndtimeo;
1592	int rc = 0;
1593
1594	if (!timeo)
1595		timeo = MAX_SCHEDULE_TIMEOUT;
1596	lock_sock(smc->clcsock->sk);
1597	if (smc->clcsock->sk->sk_err) {
1598		smc->sk.sk_err = smc->clcsock->sk->sk_err;
1599	} else if ((1 << smc->clcsock->sk->sk_state) &
1600					(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1601		rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1602		if ((rc == -EPIPE) &&
1603		    ((1 << smc->clcsock->sk->sk_state) &
1604					(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1605			rc = 0;
1606	}
1607	release_sock(smc->clcsock->sk);
1608	lock_sock(&smc->sk);
1609	if (rc != 0 || smc->sk.sk_err) {
1610		smc->sk.sk_state = SMC_CLOSED;
1611		if (rc == -EPIPE || rc == -EAGAIN)
1612			smc->sk.sk_err = EPIPE;
1613		else if (rc == -ECONNREFUSED)
1614			smc->sk.sk_err = ECONNREFUSED;
1615		else if (signal_pending(current))
1616			smc->sk.sk_err = -sock_intr_errno(timeo);
1617		sock_put(&smc->sk); /* passive closing */
1618		goto out;
1619	}
1620
1621	rc = __smc_connect(smc);
1622	if (rc < 0)
1623		smc->sk.sk_err = -rc;
1624
1625out:
1626	if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1627		if (smc->sk.sk_err) {
1628			smc->sk.sk_state_change(&smc->sk);
1629		} else { /* allow polling before and after fallback decision */
1630			smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1631			smc->sk.sk_write_space(&smc->sk);
1632		}
1633	}
1634	release_sock(&smc->sk);
1635}
1636
1637static int smc_connect(struct socket *sock, struct sockaddr *addr,
1638		       int alen, int flags)
1639{
1640	struct sock *sk = sock->sk;
1641	struct smc_sock *smc;
1642	int rc = -EINVAL;
1643
1644	smc = smc_sk(sk);
1645
1646	/* separate smc parameter checking to be safe */
1647	if (alen < sizeof(addr->sa_family))
1648		goto out_err;
1649	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1650		goto out_err;
1651
1652	lock_sock(sk);
1653	switch (sock->state) {
1654	default:
1655		rc = -EINVAL;
1656		goto out;
1657	case SS_CONNECTED:
1658		rc = sk->sk_state == SMC_ACTIVE ? -EISCONN : -EINVAL;
1659		goto out;
1660	case SS_CONNECTING:
1661		if (sk->sk_state == SMC_ACTIVE)
1662			goto connected;
1663		break;
1664	case SS_UNCONNECTED:
1665		sock->state = SS_CONNECTING;
1666		break;
1667	}
1668
1669	switch (sk->sk_state) {
1670	default:
1671		goto out;
1672	case SMC_CLOSED:
1673		rc = sock_error(sk) ? : -ECONNABORTED;
1674		sock->state = SS_UNCONNECTED;
1675		goto out;
1676	case SMC_ACTIVE:
1677		rc = -EISCONN;
1678		goto out;
1679	case SMC_INIT:
1680		break;
1681	}
1682
1683	smc_copy_sock_settings_to_clc(smc);
1684	tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1685	if (smc->connect_nonblock) {
1686		rc = -EALREADY;
1687		goto out;
1688	}
1689	rc = kernel_connect(smc->clcsock, addr, alen, flags);
1690	if (rc && rc != -EINPROGRESS)
1691		goto out;
1692
1693	if (smc->use_fallback) {
1694		sock->state = rc ? SS_CONNECTING : SS_CONNECTED;
1695		goto out;
1696	}
1697	sock_hold(&smc->sk); /* sock put in passive closing */
1698	if (flags & O_NONBLOCK) {
1699		if (queue_work(smc_hs_wq, &smc->connect_work))
1700			smc->connect_nonblock = 1;
1701		rc = -EINPROGRESS;
1702		goto out;
1703	} else {
1704		rc = __smc_connect(smc);
1705		if (rc < 0)
1706			goto out;
1707	}
1708
1709connected:
1710	rc = 0;
1711	sock->state = SS_CONNECTED;
1712out:
1713	release_sock(sk);
1714out_err:
1715	return rc;
1716}
1717
1718static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1719{
1720	struct socket *new_clcsock = NULL;
1721	struct sock *lsk = &lsmc->sk;
1722	struct sock *new_sk;
1723	int rc = -EINVAL;
1724
1725	release_sock(lsk);
1726	new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1727	if (!new_sk) {
1728		rc = -ENOMEM;
1729		lsk->sk_err = ENOMEM;
1730		*new_smc = NULL;
1731		lock_sock(lsk);
1732		goto out;
1733	}
1734	*new_smc = smc_sk(new_sk);
1735
1736	mutex_lock(&lsmc->clcsock_release_lock);
1737	if (lsmc->clcsock)
1738		rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1739	mutex_unlock(&lsmc->clcsock_release_lock);
1740	lock_sock(lsk);
1741	if  (rc < 0 && rc != -EAGAIN)
1742		lsk->sk_err = -rc;
1743	if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1744		new_sk->sk_prot->unhash(new_sk);
1745		if (new_clcsock)
1746			sock_release(new_clcsock);
1747		new_sk->sk_state = SMC_CLOSED;
1748		smc_sock_set_flag(new_sk, SOCK_DEAD);
1749		sock_put(new_sk); /* final */
1750		*new_smc = NULL;
1751		goto out;
1752	}
1753
1754	/* new clcsock has inherited the smc listen-specific sk_data_ready
1755	 * function; switch it back to the original sk_data_ready function
1756	 */
1757	new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1758
1759	/* if new clcsock has also inherited the fallback-specific callback
1760	 * functions, switch them back to the original ones.
1761	 */
1762	if (lsmc->use_fallback) {
1763		if (lsmc->clcsk_state_change)
1764			new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
1765		if (lsmc->clcsk_write_space)
1766			new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
1767		if (lsmc->clcsk_error_report)
1768			new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
1769	}
1770
1771	(*new_smc)->clcsock = new_clcsock;
1772out:
1773	return rc;
1774}
1775
1776/* add a just created sock to the accept queue of the listen sock as
1777 * candidate for a following socket accept call from user space
1778 */
1779static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1780{
1781	struct smc_sock *par = smc_sk(parent);
1782
1783	sock_hold(sk); /* sock_put in smc_accept_unlink () */
1784	spin_lock(&par->accept_q_lock);
1785	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1786	spin_unlock(&par->accept_q_lock);
1787	sk_acceptq_added(parent);
1788}
1789
1790/* remove a socket from the accept queue of its parental listening socket */
1791static void smc_accept_unlink(struct sock *sk)
1792{
1793	struct smc_sock *par = smc_sk(sk)->listen_smc;
1794
1795	spin_lock(&par->accept_q_lock);
1796	list_del_init(&smc_sk(sk)->accept_q);
1797	spin_unlock(&par->accept_q_lock);
1798	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1799	sock_put(sk); /* sock_hold in smc_accept_enqueue */
1800}
1801
1802/* remove a sock from the accept queue to bind it to a new socket created
1803 * for a socket accept call from user space
1804 */
1805struct sock *smc_accept_dequeue(struct sock *parent,
1806				struct socket *new_sock)
1807{
1808	struct smc_sock *isk, *n;
1809	struct sock *new_sk;
1810
1811	list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1812		new_sk = (struct sock *)isk;
1813
1814		smc_accept_unlink(new_sk);
1815		if (new_sk->sk_state == SMC_CLOSED) {
1816			new_sk->sk_prot->unhash(new_sk);
1817			if (isk->clcsock) {
1818				sock_release(isk->clcsock);
1819				isk->clcsock = NULL;
1820			}
1821			sock_put(new_sk); /* final */
1822			continue;
1823		}
1824		if (new_sock) {
1825			sock_graft(new_sk, new_sock);
1826			new_sock->state = SS_CONNECTED;
1827			if (isk->use_fallback) {
1828				smc_sk(new_sk)->clcsock->file = new_sock->file;
1829				isk->clcsock->file->private_data = isk->clcsock;
1830			}
1831		}
1832		return new_sk;
1833	}
1834	return NULL;
1835}
1836
1837/* clean up for a created but never accepted sock */
1838void smc_close_non_accepted(struct sock *sk)
1839{
1840	struct smc_sock *smc = smc_sk(sk);
1841
1842	sock_hold(sk); /* sock_put below */
1843	lock_sock(sk);
1844	if (!sk->sk_lingertime)
1845		/* wait for peer closing */
1846		WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
1847	__smc_release(smc);
1848	release_sock(sk);
1849	sock_put(sk); /* sock_hold above */
1850	sock_put(sk); /* final sock_put */
1851}
1852
1853static int smcr_serv_conf_first_link(struct smc_sock *smc)
1854{
1855	struct smc_link *link = smc->conn.lnk;
1856	struct smc_llc_qentry *qentry;
1857	int rc;
1858
1859	/* reg the sndbuf if it was vzalloced*/
1860	if (smc->conn.sndbuf_desc->is_vm) {
1861		if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
1862			return SMC_CLC_DECL_ERR_REGBUF;
1863	}
1864
1865	/* reg the rmb */
1866	if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
1867		return SMC_CLC_DECL_ERR_REGBUF;
1868
1869	/* send CONFIRM LINK request to client over the RoCE fabric */
1870	rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1871	if (rc < 0)
1872		return SMC_CLC_DECL_TIMEOUT_CL;
1873
1874	/* receive CONFIRM LINK response from client over the RoCE fabric */
1875	qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1876			      SMC_LLC_CONFIRM_LINK);
1877	if (!qentry) {
1878		struct smc_clc_msg_decline dclc;
1879
1880		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1881				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1882		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1883	}
1884	smc_llc_save_peer_uid(qentry);
1885	rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1886	smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1887	if (rc)
1888		return SMC_CLC_DECL_RMBE_EC;
1889
1890	/* confirm_rkey is implicit on 1st contact */
1891	smc->conn.rmb_desc->is_conf_rkey = true;
1892
1893	smc_llc_link_active(link);
1894	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1895
1896	if (link->lgr->max_links > 1) {
1897		down_write(&link->lgr->llc_conf_mutex);
1898		/* initial contact - try to establish second link */
1899		smc_llc_srv_add_link(link, NULL);
1900		up_write(&link->lgr->llc_conf_mutex);
1901	}
1902	return 0;
1903}
1904
1905/* listen worker: finish */
1906static void smc_listen_out(struct smc_sock *new_smc)
1907{
1908	struct smc_sock *lsmc = new_smc->listen_smc;
1909	struct sock *newsmcsk = &new_smc->sk;
1910
1911	if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
1912		atomic_dec(&lsmc->queued_smc_hs);
1913
 
1914	if (lsmc->sk.sk_state == SMC_LISTEN) {
1915		lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1916		smc_accept_enqueue(&lsmc->sk, newsmcsk);
1917		release_sock(&lsmc->sk);
1918	} else { /* no longer listening */
1919		smc_close_non_accepted(newsmcsk);
1920	}
1921
1922	/* Wake up accept */
1923	lsmc->sk.sk_data_ready(&lsmc->sk);
1924	sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1925}
1926
1927/* listen worker: finish in state connected */
1928static void smc_listen_out_connected(struct smc_sock *new_smc)
1929{
1930	struct sock *newsmcsk = &new_smc->sk;
1931
1932	if (newsmcsk->sk_state == SMC_INIT)
1933		newsmcsk->sk_state = SMC_ACTIVE;
1934
1935	smc_listen_out(new_smc);
1936}
1937
1938/* listen worker: finish in error state */
1939static void smc_listen_out_err(struct smc_sock *new_smc)
1940{
1941	struct sock *newsmcsk = &new_smc->sk;
1942	struct net *net = sock_net(newsmcsk);
1943
1944	this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1945	if (newsmcsk->sk_state == SMC_INIT)
1946		sock_put(&new_smc->sk); /* passive closing */
1947	newsmcsk->sk_state = SMC_CLOSED;
1948
1949	smc_listen_out(new_smc);
1950}
1951
1952/* listen worker: decline and fall back if possible */
1953static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1954			       int local_first, u8 version)
1955{
1956	/* RDMA setup failed, switch back to TCP */
1957	smc_conn_abort(new_smc, local_first);
1958	if (reason_code < 0 ||
1959	    smc_switch_to_fallback(new_smc, reason_code)) {
1960		/* error, no fallback possible */
1961		smc_listen_out_err(new_smc);
1962		return;
1963	}
1964	if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1965		if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1966			smc_listen_out_err(new_smc);
1967			return;
1968		}
1969	}
1970	smc_listen_out_connected(new_smc);
1971}
1972
1973/* listen worker: version checking */
1974static int smc_listen_v2_check(struct smc_sock *new_smc,
1975			       struct smc_clc_msg_proposal *pclc,
1976			       struct smc_init_info *ini)
1977{
1978	struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1979	struct smc_clc_v2_extension *pclc_v2_ext;
1980	int rc = SMC_CLC_DECL_PEERNOSMC;
1981
1982	ini->smc_type_v1 = pclc->hdr.typev1;
1983	ini->smc_type_v2 = pclc->hdr.typev2;
1984	ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1985	ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1986	if (pclc->hdr.version > SMC_V1) {
1987		if (smcd_indicated(ini->smc_type_v2))
1988			ini->smcd_version |= SMC_V2;
1989		if (smcr_indicated(ini->smc_type_v2))
1990			ini->smcr_version |= SMC_V2;
1991	}
1992	if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
1993		rc = SMC_CLC_DECL_PEERNOSMC;
1994		goto out;
1995	}
1996	pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1997	if (!pclc_v2_ext) {
1998		ini->smcd_version &= ~SMC_V2;
1999		ini->smcr_version &= ~SMC_V2;
2000		rc = SMC_CLC_DECL_NOV2EXT;
2001		goto out;
2002	}
2003	pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
2004	if (ini->smcd_version & SMC_V2) {
2005		if (!smc_ism_is_v2_capable()) {
2006			ini->smcd_version &= ~SMC_V2;
2007			rc = SMC_CLC_DECL_NOISM2SUPP;
2008		} else if (!pclc_smcd_v2_ext) {
2009			ini->smcd_version &= ~SMC_V2;
2010			rc = SMC_CLC_DECL_NOV2DEXT;
2011		} else if (!pclc_v2_ext->hdr.eid_cnt &&
2012			   !pclc_v2_ext->hdr.flag.seid) {
2013			ini->smcd_version &= ~SMC_V2;
2014			rc = SMC_CLC_DECL_NOUEID;
2015		}
2016	}
2017	if (ini->smcr_version & SMC_V2) {
2018		if (!pclc_v2_ext->hdr.eid_cnt) {
2019			ini->smcr_version &= ~SMC_V2;
2020			rc = SMC_CLC_DECL_NOUEID;
2021		}
2022	}
2023
2024	ini->release_nr = pclc_v2_ext->hdr.flag.release;
2025	if (pclc_v2_ext->hdr.flag.release > SMC_RELEASE)
2026		ini->release_nr = SMC_RELEASE;
2027
2028out:
2029	if (!ini->smcd_version && !ini->smcr_version)
2030		return rc;
2031
2032	return 0;
2033}
2034
2035/* listen worker: check prefixes */
2036static int smc_listen_prfx_check(struct smc_sock *new_smc,
2037				 struct smc_clc_msg_proposal *pclc)
2038{
2039	struct smc_clc_msg_proposal_prefix *pclc_prfx;
2040	struct socket *newclcsock = new_smc->clcsock;
2041
2042	if (pclc->hdr.typev1 == SMC_TYPE_N)
2043		return 0;
2044	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
 
 
2045	if (smc_clc_prfx_match(newclcsock, pclc_prfx))
2046		return SMC_CLC_DECL_DIFFPREFIX;
2047
2048	return 0;
2049}
2050
2051/* listen worker: initialize connection and buffers */
2052static int smc_listen_rdma_init(struct smc_sock *new_smc,
2053				struct smc_init_info *ini)
2054{
2055	int rc;
2056
2057	/* allocate connection / link group */
2058	rc = smc_conn_create(new_smc, ini);
2059	if (rc)
2060		return rc;
2061
2062	/* create send buffer and rmb */
2063	if (smc_buf_create(new_smc, false)) {
2064		smc_conn_abort(new_smc, ini->first_contact_local);
2065		return SMC_CLC_DECL_MEM;
2066	}
2067
2068	return 0;
2069}
2070
2071/* listen worker: initialize connection and buffers for SMC-D */
2072static int smc_listen_ism_init(struct smc_sock *new_smc,
2073			       struct smc_init_info *ini)
2074{
2075	int rc;
2076
2077	rc = smc_conn_create(new_smc, ini);
2078	if (rc)
2079		return rc;
2080
2081	/* Create send and receive buffers */
2082	rc = smc_buf_create(new_smc, true);
2083	if (rc) {
2084		smc_conn_abort(new_smc, ini->first_contact_local);
2085		return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
2086					 SMC_CLC_DECL_MEM;
2087	}
2088
2089	return 0;
2090}
2091
2092static bool smc_is_already_selected(struct smcd_dev *smcd,
2093				    struct smc_init_info *ini,
2094				    int matches)
2095{
2096	int i;
2097
2098	for (i = 0; i < matches; i++)
2099		if (smcd == ini->ism_dev[i])
2100			return true;
2101
2102	return false;
2103}
2104
2105/* check for ISM devices matching proposed ISM devices */
2106static void smc_check_ism_v2_match(struct smc_init_info *ini,
2107				   u16 proposed_chid,
2108				   struct smcd_gid *proposed_gid,
2109				   unsigned int *matches)
2110{
2111	struct smcd_dev *smcd;
2112
2113	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2114		if (smcd->going_away)
2115			continue;
2116		if (smc_is_already_selected(smcd, ini, *matches))
2117			continue;
2118		if (smc_ism_get_chid(smcd) == proposed_chid &&
2119		    !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
2120			ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
2121			if (__smc_ism_is_virtual(proposed_chid))
2122				ini->ism_peer_gid[*matches].gid_ext =
2123							proposed_gid->gid_ext;
2124				/* non-virtual ISM's peer gid_ext remains 0. */
2125			ini->ism_dev[*matches] = smcd;
2126			(*matches)++;
2127			break;
2128		}
2129	}
2130}
2131
2132static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
2133{
2134	if (!ini->rc)
2135		ini->rc = rc;
2136}
2137
2138static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
2139					struct smc_clc_msg_proposal *pclc,
2140					struct smc_init_info *ini)
2141{
2142	struct smc_clc_smcd_v2_extension *smcd_v2_ext;
2143	struct smc_clc_v2_extension *smc_v2_ext;
2144	struct smc_clc_msg_smcd *pclc_smcd;
2145	unsigned int matches = 0;
2146	struct smcd_gid smcd_gid;
2147	u8 smcd_version;
2148	u8 *eid = NULL;
2149	int i, rc;
2150	u16 chid;
2151
2152	if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
2153		goto not_found;
2154
2155	pclc_smcd = smc_get_clc_msg_smcd(pclc);
2156	smc_v2_ext = smc_get_clc_v2_ext(pclc);
2157	smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
 
 
2158
2159	mutex_lock(&smcd_dev_list.mutex);
2160	if (pclc_smcd->ism.chid) {
2161		/* check for ISM device matching proposed native ISM device */
2162		smcd_gid.gid = ntohll(pclc_smcd->ism.gid);
2163		smcd_gid.gid_ext = 0;
2164		smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
2165				       &smcd_gid, &matches);
2166	}
2167	for (i = 0; i < smc_v2_ext->hdr.ism_gid_cnt; i++) {
2168		/* check for ISM devices matching proposed non-native ISM
2169		 * devices
2170		 */
2171		smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
2172		smcd_gid.gid_ext = 0;
2173		chid = ntohs(smcd_v2_ext->gidchid[i].chid);
2174		if (__smc_ism_is_virtual(chid)) {
2175			if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
2176			    chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
2177				/* each virtual ISM device takes two GID-CHID
2178				 * entries and CHID of the second entry repeats
2179				 * that of the first entry.
2180				 *
2181				 * So check if the next GID-CHID entry exists
2182				 * and both two entries' CHIDs are the same.
2183				 */
2184				continue;
2185			smcd_gid.gid_ext =
2186				ntohll(smcd_v2_ext->gidchid[++i].gid);
2187		}
2188		smc_check_ism_v2_match(ini, chid, &smcd_gid, &matches);
2189	}
2190	mutex_unlock(&smcd_dev_list.mutex);
2191
2192	if (!ini->ism_dev[0]) {
2193		smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
2194		goto not_found;
2195	}
2196
2197	smc_ism_get_system_eid(&eid);
2198	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
2199			       smcd_v2_ext->system_eid, eid))
2200		goto not_found;
2201
2202	/* separate - outside the smcd_dev_list.lock */
2203	smcd_version = ini->smcd_version;
2204	for (i = 0; i < matches; i++) {
2205		ini->smcd_version = SMC_V2;
2206		ini->is_smcd = true;
2207		ini->ism_selected = i;
2208		rc = smc_listen_ism_init(new_smc, ini);
2209		if (rc) {
2210			smc_find_ism_store_rc(rc, ini);
2211			/* try next active ISM device */
2212			continue;
2213		}
2214		return; /* matching and usable V2 ISM device found */
2215	}
2216	/* no V2 ISM device could be initialized */
2217	ini->smcd_version = smcd_version;	/* restore original value */
2218	ini->negotiated_eid[0] = 0;
2219
2220not_found:
2221	ini->smcd_version &= ~SMC_V2;
2222	ini->ism_dev[0] = NULL;
2223	ini->is_smcd = false;
2224}
2225
2226static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
2227					struct smc_clc_msg_proposal *pclc,
2228					struct smc_init_info *ini)
2229{
2230	struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
2231	int rc = 0;
2232
2233	/* check if ISM V1 is available */
2234	if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
 
 
2235		goto not_found;
2236	ini->is_smcd = true; /* prepare ISM check */
2237	ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
2238	ini->ism_peer_gid[0].gid_ext = 0;
2239	rc = smc_find_ism_device(new_smc, ini);
2240	if (rc)
2241		goto not_found;
2242	ini->ism_selected = 0;
2243	rc = smc_listen_ism_init(new_smc, ini);
2244	if (!rc)
2245		return;		/* V1 ISM device found */
2246
2247not_found:
2248	smc_find_ism_store_rc(rc, ini);
2249	ini->smcd_version &= ~SMC_V1;
2250	ini->ism_dev[0] = NULL;
2251	ini->is_smcd = false;
2252}
2253
2254/* listen worker: register buffers */
2255static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
2256{
2257	struct smc_connection *conn = &new_smc->conn;
2258
2259	if (!local_first) {
2260		/* reg sendbufs if they were vzalloced */
2261		if (conn->sndbuf_desc->is_vm) {
2262			if (smcr_lgr_reg_sndbufs(conn->lnk,
2263						 conn->sndbuf_desc))
2264				return SMC_CLC_DECL_ERR_REGBUF;
2265		}
2266		if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
2267			return SMC_CLC_DECL_ERR_REGBUF;
2268	}
2269
2270	return 0;
2271}
2272
2273static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
2274					 struct smc_clc_msg_proposal *pclc,
2275					 struct smc_init_info *ini)
2276{
2277	struct smc_clc_v2_extension *smc_v2_ext;
2278	u8 smcr_version;
2279	int rc;
2280
2281	if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
2282		goto not_found;
2283
2284	smc_v2_ext = smc_get_clc_v2_ext(pclc);
2285	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
 
2286		goto not_found;
2287
2288	/* prepare RDMA check */
2289	memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
2290	memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
2291	memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
2292	ini->check_smcrv2 = true;
2293	ini->smcrv2.clc_sk = new_smc->clcsock->sk;
2294	ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
2295	ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
2296	rc = smc_find_rdma_device(new_smc, ini);
2297	if (rc) {
2298		smc_find_ism_store_rc(rc, ini);
2299		goto not_found;
2300	}
2301	if (!ini->smcrv2.uses_gateway)
2302		memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
2303
2304	smcr_version = ini->smcr_version;
2305	ini->smcr_version = SMC_V2;
2306	rc = smc_listen_rdma_init(new_smc, ini);
2307	if (!rc) {
2308		rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
2309		if (rc)
2310			smc_conn_abort(new_smc, ini->first_contact_local);
2311	}
2312	if (!rc)
2313		return;
2314	ini->smcr_version = smcr_version;
2315	smc_find_ism_store_rc(rc, ini);
2316
2317not_found:
2318	ini->smcr_version &= ~SMC_V2;
2319	ini->smcrv2.ib_dev_v2 = NULL;
2320	ini->check_smcrv2 = false;
2321}
2322
2323static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
2324					struct smc_clc_msg_proposal *pclc,
2325					struct smc_init_info *ini)
2326{
2327	int rc;
2328
2329	if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
2330		return SMC_CLC_DECL_NOSMCDEV;
2331
2332	/* prepare RDMA check */
2333	memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
2334	memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
2335	memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
2336	rc = smc_find_rdma_device(new_smc, ini);
2337	if (rc) {
2338		/* no RDMA device found */
2339		return SMC_CLC_DECL_NOSMCDEV;
2340	}
2341	rc = smc_listen_rdma_init(new_smc, ini);
2342	if (rc)
2343		return rc;
2344	return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
2345}
2346
2347/* determine the local device matching to proposal */
2348static int smc_listen_find_device(struct smc_sock *new_smc,
2349				  struct smc_clc_msg_proposal *pclc,
2350				  struct smc_init_info *ini)
2351{
2352	int prfx_rc;
2353
2354	/* check for ISM device matching V2 proposed device */
2355	smc_find_ism_v2_device_serv(new_smc, pclc, ini);
2356	if (ini->ism_dev[0])
2357		return 0;
2358
2359	/* check for matching IP prefix and subnet length (V1) */
2360	prfx_rc = smc_listen_prfx_check(new_smc, pclc);
2361	if (prfx_rc)
2362		smc_find_ism_store_rc(prfx_rc, ini);
2363
2364	/* get vlan id from IP device */
2365	if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
2366		return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
2367
2368	/* check for ISM device matching V1 proposed device */
2369	if (!prfx_rc)
2370		smc_find_ism_v1_device_serv(new_smc, pclc, ini);
2371	if (ini->ism_dev[0])
2372		return 0;
2373
2374	if (!smcr_indicated(pclc->hdr.typev1) &&
2375	    !smcr_indicated(pclc->hdr.typev2))
2376		/* skip RDMA and decline */
2377		return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
2378
2379	/* check if RDMA V2 is available */
2380	smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
2381	if (ini->smcrv2.ib_dev_v2)
2382		return 0;
2383
2384	/* check if RDMA V1 is available */
2385	if (!prfx_rc) {
2386		int rc;
2387
2388		rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
2389		smc_find_ism_store_rc(rc, ini);
2390		return (!rc) ? 0 : ini->rc;
2391	}
2392	return prfx_rc;
2393}
2394
2395/* listen worker: finish RDMA setup */
2396static int smc_listen_rdma_finish(struct smc_sock *new_smc,
2397				  struct smc_clc_msg_accept_confirm *cclc,
2398				  bool local_first,
2399				  struct smc_init_info *ini)
2400{
2401	struct smc_link *link = new_smc->conn.lnk;
2402	int reason_code = 0;
2403
2404	if (local_first)
2405		smc_link_save_peer_info(link, cclc, ini);
2406
2407	if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
2408		return SMC_CLC_DECL_ERR_RTOK;
2409
2410	if (local_first) {
2411		if (smc_ib_ready_link(link))
2412			return SMC_CLC_DECL_ERR_RDYLNK;
2413		/* QP confirmation over RoCE fabric */
2414		smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
2415		reason_code = smcr_serv_conf_first_link(new_smc);
2416		smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
2417	}
2418	return reason_code;
2419}
2420
2421/* setup for connection of server */
2422static void smc_listen_work(struct work_struct *work)
2423{
2424	struct smc_sock *new_smc = container_of(work, struct smc_sock,
2425						smc_listen_work);
2426	struct socket *newclcsock = new_smc->clcsock;
2427	struct smc_clc_msg_accept_confirm *cclc;
2428	struct smc_clc_msg_proposal_area *buf;
2429	struct smc_clc_msg_proposal *pclc;
2430	struct smc_init_info *ini = NULL;
2431	u8 proposal_version = SMC_V1;
2432	u8 accept_version;
2433	int rc = 0;
2434
 
2435	if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
2436		return smc_listen_out_err(new_smc);
2437
2438	if (new_smc->use_fallback) {
2439		smc_listen_out_connected(new_smc);
2440		return;
2441	}
2442
2443	/* check if peer is smc capable */
2444	if (!tcp_sk(newclcsock->sk)->syn_smc) {
2445		rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
2446		if (rc)
2447			smc_listen_out_err(new_smc);
2448		else
2449			smc_listen_out_connected(new_smc);
2450		return;
2451	}
2452
2453	/* do inband token exchange -
2454	 * wait for and receive SMC Proposal CLC message
2455	 */
2456	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
2457	if (!buf) {
2458		rc = SMC_CLC_DECL_MEM;
2459		goto out_decl;
2460	}
2461	pclc = (struct smc_clc_msg_proposal *)buf;
2462	rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
2463			      SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
2464	if (rc)
2465		goto out_decl;
2466
2467	if (pclc->hdr.version > SMC_V1)
2468		proposal_version = SMC_V2;
2469
2470	/* IPSec connections opt out of SMC optimizations */
2471	if (using_ipsec(new_smc)) {
2472		rc = SMC_CLC_DECL_IPSEC;
2473		goto out_decl;
2474	}
2475
2476	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
2477	if (!ini) {
2478		rc = SMC_CLC_DECL_MEM;
2479		goto out_decl;
2480	}
2481
2482	/* initial version checking */
2483	rc = smc_listen_v2_check(new_smc, pclc, ini);
2484	if (rc)
2485		goto out_decl;
2486
2487	rc = smc_clc_srv_v2x_features_validate(new_smc, pclc, ini);
2488	if (rc)
2489		goto out_decl;
2490
2491	mutex_lock(&smc_server_lgr_pending);
2492	smc_close_init(new_smc);
2493	smc_rx_init(new_smc);
2494	smc_tx_init(new_smc);
2495
2496	/* determine ISM or RoCE device used for connection */
2497	rc = smc_listen_find_device(new_smc, pclc, ini);
2498	if (rc)
2499		goto out_unlock;
2500
2501	/* send SMC Accept CLC message */
2502	accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
2503	rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
2504				 accept_version, ini->negotiated_eid, ini);
2505	if (rc)
2506		goto out_unlock;
2507
2508	/* SMC-D does not need this lock any more */
2509	if (ini->is_smcd)
2510		mutex_unlock(&smc_server_lgr_pending);
2511
2512	/* receive SMC Confirm CLC message */
2513	memset(buf, 0, sizeof(*buf));
2514	cclc = (struct smc_clc_msg_accept_confirm *)buf;
2515	rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
2516			      SMC_CLC_CONFIRM, CLC_WAIT_TIME);
2517	if (rc) {
2518		if (!ini->is_smcd)
2519			goto out_unlock;
2520		goto out_decl;
2521	}
2522
2523	rc = smc_clc_v2x_features_confirm_check(cclc, ini);
2524	if (rc) {
2525		if (!ini->is_smcd)
2526			goto out_unlock;
2527		goto out_decl;
2528	}
2529
2530	/* fce smc release version is needed in smc_listen_rdma_finish,
2531	 * so save fce info here.
2532	 */
2533	smc_conn_save_peer_info_fce(new_smc, cclc);
2534
2535	/* finish worker */
2536	if (!ini->is_smcd) {
2537		rc = smc_listen_rdma_finish(new_smc, cclc,
2538					    ini->first_contact_local, ini);
2539		if (rc)
2540			goto out_unlock;
2541		mutex_unlock(&smc_server_lgr_pending);
2542	}
2543	smc_conn_save_peer_info(new_smc, cclc);
 
 
 
 
 
 
 
 
2544	smc_listen_out_connected(new_smc);
2545	SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
2546	goto out_free;
2547
2548out_unlock:
2549	mutex_unlock(&smc_server_lgr_pending);
2550out_decl:
2551	smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
2552			   proposal_version);
2553out_free:
2554	kfree(ini);
2555	kfree(buf);
2556}
2557
2558static void smc_tcp_listen_work(struct work_struct *work)
2559{
2560	struct smc_sock *lsmc = container_of(work, struct smc_sock,
2561					     tcp_listen_work);
2562	struct sock *lsk = &lsmc->sk;
2563	struct smc_sock *new_smc;
2564	int rc = 0;
2565
2566	lock_sock(lsk);
2567	while (lsk->sk_state == SMC_LISTEN) {
2568		rc = smc_clcsock_accept(lsmc, &new_smc);
2569		if (rc) /* clcsock accept queue empty or error */
2570			goto out;
2571		if (!new_smc)
2572			continue;
2573
2574		if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
2575			atomic_inc(&lsmc->queued_smc_hs);
2576
2577		new_smc->listen_smc = lsmc;
2578		new_smc->use_fallback = lsmc->use_fallback;
2579		new_smc->fallback_rsn = lsmc->fallback_rsn;
2580		sock_hold(lsk); /* sock_put in smc_listen_work */
2581		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2582		smc_copy_sock_settings_to_smc(new_smc);
2583		sock_hold(&new_smc->sk); /* sock_put in passive closing */
2584		if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2585			sock_put(&new_smc->sk);
2586	}
2587
2588out:
2589	release_sock(lsk);
2590	sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2591}
2592
2593static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2594{
2595	struct smc_sock *lsmc;
2596
2597	read_lock_bh(&listen_clcsock->sk_callback_lock);
2598	lsmc = smc_clcsock_user_data(listen_clcsock);
2599	if (!lsmc)
2600		goto out;
2601	lsmc->clcsk_data_ready(listen_clcsock);
2602	if (lsmc->sk.sk_state == SMC_LISTEN) {
2603		sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2604		if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
2605			sock_put(&lsmc->sk);
2606	}
2607out:
2608	read_unlock_bh(&listen_clcsock->sk_callback_lock);
2609}
2610
2611static int smc_listen(struct socket *sock, int backlog)
2612{
2613	struct sock *sk = sock->sk;
2614	struct smc_sock *smc;
2615	int rc;
2616
2617	smc = smc_sk(sk);
2618	lock_sock(sk);
2619
2620	rc = -EINVAL;
2621	if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2622	    smc->connect_nonblock || sock->state != SS_UNCONNECTED)
2623		goto out;
2624
2625	rc = 0;
2626	if (sk->sk_state == SMC_LISTEN) {
2627		sk->sk_max_ack_backlog = backlog;
2628		goto out;
2629	}
2630	/* some socket options are handled in core, so we could not apply
2631	 * them to the clc socket -- copy smc socket options to clc socket
2632	 */
2633	smc_copy_sock_settings_to_clc(smc);
2634	if (!smc->use_fallback)
2635		tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2636
2637	/* save original sk_data_ready function and establish
2638	 * smc-specific sk_data_ready function
2639	 */
2640	write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2641	smc->clcsock->sk->sk_user_data =
2642		(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2643	smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
2644			       smc_clcsock_data_ready, &smc->clcsk_data_ready);
2645	write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2646
2647	/* save original ops */
2648	smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
2649
2650	smc->af_ops = *smc->ori_af_ops;
2651	smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
2652
2653	inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
2654
2655	if (smc->limit_smc_hs)
2656		tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
2657
2658	rc = kernel_listen(smc->clcsock, backlog);
2659	if (rc) {
2660		write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2661		smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
2662				       &smc->clcsk_data_ready);
2663		smc->clcsock->sk->sk_user_data = NULL;
2664		write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2665		goto out;
2666	}
2667	sk->sk_max_ack_backlog = backlog;
2668	sk->sk_ack_backlog = 0;
2669	sk->sk_state = SMC_LISTEN;
2670
2671out:
2672	release_sock(sk);
2673	return rc;
2674}
2675
2676static int smc_accept(struct socket *sock, struct socket *new_sock,
2677		      int flags, bool kern)
2678{
2679	struct sock *sk = sock->sk, *nsk;
2680	DECLARE_WAITQUEUE(wait, current);
2681	struct smc_sock *lsmc;
2682	long timeo;
2683	int rc = 0;
2684
2685	lsmc = smc_sk(sk);
2686	sock_hold(sk); /* sock_put below */
2687	lock_sock(sk);
2688
2689	if (lsmc->sk.sk_state != SMC_LISTEN) {
2690		rc = -EINVAL;
2691		release_sock(sk);
2692		goto out;
2693	}
2694
2695	/* Wait for an incoming connection */
2696	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2697	add_wait_queue_exclusive(sk_sleep(sk), &wait);
2698	while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2699		set_current_state(TASK_INTERRUPTIBLE);
2700		if (!timeo) {
2701			rc = -EAGAIN;
2702			break;
2703		}
2704		release_sock(sk);
2705		timeo = schedule_timeout(timeo);
2706		/* wakeup by sk_data_ready in smc_listen_work() */
2707		sched_annotate_sleep();
2708		lock_sock(sk);
2709		if (signal_pending(current)) {
2710			rc = sock_intr_errno(timeo);
2711			break;
2712		}
2713	}
2714	set_current_state(TASK_RUNNING);
2715	remove_wait_queue(sk_sleep(sk), &wait);
2716
2717	if (!rc)
2718		rc = sock_error(nsk);
2719	release_sock(sk);
2720	if (rc)
2721		goto out;
2722
2723	if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
2724		/* wait till data arrives on the socket */
2725		timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2726								MSEC_PER_SEC);
2727		if (smc_sk(nsk)->use_fallback) {
2728			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2729
2730			lock_sock(clcsk);
2731			if (skb_queue_empty(&clcsk->sk_receive_queue))
2732				sk_wait_data(clcsk, &timeo, NULL);
2733			release_sock(clcsk);
2734		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2735			lock_sock(nsk);
2736			smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2737			release_sock(nsk);
2738		}
2739	}
2740
2741out:
2742	sock_put(sk); /* sock_hold above */
2743	return rc;
2744}
2745
2746static int smc_getname(struct socket *sock, struct sockaddr *addr,
2747		       int peer)
2748{
2749	struct smc_sock *smc;
2750
2751	if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2752	    (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2753		return -ENOTCONN;
2754
2755	smc = smc_sk(sock->sk);
2756
2757	return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2758}
2759
2760static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2761{
2762	struct sock *sk = sock->sk;
2763	struct smc_sock *smc;
2764	int rc;
2765
2766	smc = smc_sk(sk);
2767	lock_sock(sk);
2768
2769	/* SMC does not support connect with fastopen */
2770	if (msg->msg_flags & MSG_FASTOPEN) {
2771		/* not connected yet, fallback */
2772		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2773			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2774			if (rc)
2775				goto out;
2776		} else {
2777			rc = -EINVAL;
2778			goto out;
2779		}
2780	} else if ((sk->sk_state != SMC_ACTIVE) &&
2781		   (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2782		   (sk->sk_state != SMC_INIT)) {
2783		rc = -EPIPE;
2784		goto out;
2785	}
2786
2787	if (smc->use_fallback) {
2788		rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2789	} else {
2790		rc = smc_tx_sendmsg(smc, msg, len);
2791		SMC_STAT_TX_PAYLOAD(smc, len, rc);
2792	}
2793out:
2794	release_sock(sk);
2795	return rc;
2796}
2797
2798static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2799		       int flags)
2800{
2801	struct sock *sk = sock->sk;
2802	struct smc_sock *smc;
2803	int rc = -ENOTCONN;
2804
2805	smc = smc_sk(sk);
2806	lock_sock(sk);
2807	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2808		/* socket was connected before, no more data to read */
2809		rc = 0;
2810		goto out;
2811	}
2812	if ((sk->sk_state == SMC_INIT) ||
2813	    (sk->sk_state == SMC_LISTEN) ||
2814	    (sk->sk_state == SMC_CLOSED))
2815		goto out;
2816
2817	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2818		rc = 0;
2819		goto out;
2820	}
2821
2822	if (smc->use_fallback) {
2823		rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2824	} else {
2825		msg->msg_namelen = 0;
2826		rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2827		SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2828	}
2829
2830out:
2831	release_sock(sk);
2832	return rc;
2833}
2834
2835static __poll_t smc_accept_poll(struct sock *parent)
2836{
2837	struct smc_sock *isk = smc_sk(parent);
2838	__poll_t mask = 0;
2839
2840	spin_lock(&isk->accept_q_lock);
2841	if (!list_empty(&isk->accept_q))
2842		mask = EPOLLIN | EPOLLRDNORM;
2843	spin_unlock(&isk->accept_q_lock);
2844
2845	return mask;
2846}
2847
2848static __poll_t smc_poll(struct file *file, struct socket *sock,
2849			     poll_table *wait)
2850{
2851	struct sock *sk = sock->sk;
2852	struct smc_sock *smc;
2853	__poll_t mask = 0;
2854
2855	if (!sk)
2856		return EPOLLNVAL;
2857
2858	smc = smc_sk(sock->sk);
2859	if (smc->use_fallback) {
2860		/* delegate to CLC child sock */
2861		mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2862		sk->sk_err = smc->clcsock->sk->sk_err;
2863	} else {
2864		if (sk->sk_state != SMC_CLOSED)
2865			sock_poll_wait(file, sock, wait);
2866		if (sk->sk_err)
2867			mask |= EPOLLERR;
2868		if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2869		    (sk->sk_state == SMC_CLOSED))
2870			mask |= EPOLLHUP;
2871		if (sk->sk_state == SMC_LISTEN) {
2872			/* woken up by sk_data_ready in smc_listen_work() */
2873			mask |= smc_accept_poll(sk);
2874		} else if (smc->use_fallback) { /* as result of connect_work()*/
2875			mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2876							   wait);
2877			sk->sk_err = smc->clcsock->sk->sk_err;
2878		} else {
2879			if ((sk->sk_state != SMC_INIT &&
2880			     atomic_read(&smc->conn.sndbuf_space)) ||
2881			    sk->sk_shutdown & SEND_SHUTDOWN) {
2882				mask |= EPOLLOUT | EPOLLWRNORM;
2883			} else {
2884				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2885				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 
 
 
 
 
 
 
2886			}
2887			if (atomic_read(&smc->conn.bytes_to_rcv))
2888				mask |= EPOLLIN | EPOLLRDNORM;
2889			if (sk->sk_shutdown & RCV_SHUTDOWN)
2890				mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2891			if (sk->sk_state == SMC_APPCLOSEWAIT1)
2892				mask |= EPOLLIN;
2893			if (smc->conn.urg_state == SMC_URG_VALID)
2894				mask |= EPOLLPRI;
2895		}
2896	}
2897
2898	return mask;
2899}
2900
2901static int smc_shutdown(struct socket *sock, int how)
2902{
2903	struct sock *sk = sock->sk;
2904	bool do_shutdown = true;
2905	struct smc_sock *smc;
2906	int rc = -EINVAL;
2907	int old_state;
2908	int rc1 = 0;
2909
2910	smc = smc_sk(sk);
2911
2912	if ((how < SHUT_RD) || (how > SHUT_RDWR))
2913		return rc;
2914
2915	lock_sock(sk);
2916
2917	if (sock->state == SS_CONNECTING) {
2918		if (sk->sk_state == SMC_ACTIVE)
2919			sock->state = SS_CONNECTED;
2920		else if (sk->sk_state == SMC_PEERCLOSEWAIT1 ||
2921			 sk->sk_state == SMC_PEERCLOSEWAIT2 ||
2922			 sk->sk_state == SMC_APPCLOSEWAIT1 ||
2923			 sk->sk_state == SMC_APPCLOSEWAIT2 ||
2924			 sk->sk_state == SMC_APPFINCLOSEWAIT)
2925			sock->state = SS_DISCONNECTING;
2926	}
2927
2928	rc = -ENOTCONN;
2929	if ((sk->sk_state != SMC_ACTIVE) &&
2930	    (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2931	    (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2932	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2933	    (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2934	    (sk->sk_state != SMC_APPFINCLOSEWAIT))
2935		goto out;
2936	if (smc->use_fallback) {
2937		rc = kernel_sock_shutdown(smc->clcsock, how);
2938		sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2939		if (sk->sk_shutdown == SHUTDOWN_MASK) {
2940			sk->sk_state = SMC_CLOSED;
2941			sk->sk_socket->state = SS_UNCONNECTED;
2942			sock_put(sk);
2943		}
2944		goto out;
2945	}
2946	switch (how) {
2947	case SHUT_RDWR:		/* shutdown in both directions */
2948		old_state = sk->sk_state;
2949		rc = smc_close_active(smc);
2950		if (old_state == SMC_ACTIVE &&
2951		    sk->sk_state == SMC_PEERCLOSEWAIT1)
2952			do_shutdown = false;
2953		break;
2954	case SHUT_WR:
2955		rc = smc_close_shutdown_write(smc);
2956		break;
2957	case SHUT_RD:
2958		rc = 0;
2959		/* nothing more to do because peer is not involved */
2960		break;
2961	}
2962	if (do_shutdown && smc->clcsock)
2963		rc1 = kernel_sock_shutdown(smc->clcsock, how);
2964	/* map sock_shutdown_cmd constants to sk_shutdown value range */
2965	sk->sk_shutdown |= how + 1;
2966
2967	if (sk->sk_state == SMC_CLOSED)
2968		sock->state = SS_UNCONNECTED;
2969	else
2970		sock->state = SS_DISCONNECTING;
2971out:
2972	release_sock(sk);
2973	return rc ? rc : rc1;
2974}
2975
2976static int __smc_getsockopt(struct socket *sock, int level, int optname,
2977			    char __user *optval, int __user *optlen)
2978{
2979	struct smc_sock *smc;
2980	int val, len;
2981
2982	smc = smc_sk(sock->sk);
2983
2984	if (get_user(len, optlen))
2985		return -EFAULT;
2986
2987	len = min_t(int, len, sizeof(int));
2988
2989	if (len < 0)
2990		return -EINVAL;
2991
2992	switch (optname) {
2993	case SMC_LIMIT_HS:
2994		val = smc->limit_smc_hs;
2995		break;
2996	default:
2997		return -EOPNOTSUPP;
2998	}
2999
3000	if (put_user(len, optlen))
3001		return -EFAULT;
3002	if (copy_to_user(optval, &val, len))
3003		return -EFAULT;
3004
3005	return 0;
3006}
3007
3008static int __smc_setsockopt(struct socket *sock, int level, int optname,
3009			    sockptr_t optval, unsigned int optlen)
3010{
3011	struct sock *sk = sock->sk;
3012	struct smc_sock *smc;
3013	int val, rc;
3014
3015	smc = smc_sk(sk);
3016
3017	lock_sock(sk);
3018	switch (optname) {
3019	case SMC_LIMIT_HS:
3020		if (optlen < sizeof(int)) {
3021			rc = -EINVAL;
3022			break;
3023		}
3024		if (copy_from_sockptr(&val, optval, sizeof(int))) {
3025			rc = -EFAULT;
3026			break;
3027		}
3028
3029		smc->limit_smc_hs = !!val;
3030		rc = 0;
3031		break;
3032	default:
3033		rc = -EOPNOTSUPP;
3034		break;
3035	}
3036	release_sock(sk);
3037
3038	return rc;
3039}
3040
3041static int smc_setsockopt(struct socket *sock, int level, int optname,
3042			  sockptr_t optval, unsigned int optlen)
3043{
3044	struct sock *sk = sock->sk;
3045	struct smc_sock *smc;
3046	int val, rc;
3047
3048	if (level == SOL_TCP && optname == TCP_ULP)
3049		return -EOPNOTSUPP;
3050	else if (level == SOL_SMC)
3051		return __smc_setsockopt(sock, level, optname, optval, optlen);
3052
3053	smc = smc_sk(sk);
3054
3055	/* generic setsockopts reaching us here always apply to the
3056	 * CLC socket
3057	 */
3058	mutex_lock(&smc->clcsock_release_lock);
3059	if (!smc->clcsock) {
3060		mutex_unlock(&smc->clcsock_release_lock);
3061		return -EBADF;
3062	}
3063	if (unlikely(!smc->clcsock->ops->setsockopt))
3064		rc = -EOPNOTSUPP;
3065	else
3066		rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
3067						   optval, optlen);
3068	if (smc->clcsock->sk->sk_err) {
3069		sk->sk_err = smc->clcsock->sk->sk_err;
3070		sk_error_report(sk);
3071	}
3072	mutex_unlock(&smc->clcsock_release_lock);
3073
3074	if (optlen < sizeof(int))
3075		return -EINVAL;
3076	if (copy_from_sockptr(&val, optval, sizeof(int)))
3077		return -EFAULT;
3078
3079	lock_sock(sk);
3080	if (rc || smc->use_fallback)
3081		goto out;
3082	switch (optname) {
3083	case TCP_FASTOPEN:
3084	case TCP_FASTOPEN_CONNECT:
3085	case TCP_FASTOPEN_KEY:
3086	case TCP_FASTOPEN_NO_COOKIE:
3087		/* option not supported by SMC */
3088		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
3089			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
3090		} else {
3091			rc = -EINVAL;
3092		}
3093		break;
3094	case TCP_NODELAY:
3095		if (sk->sk_state != SMC_INIT &&
3096		    sk->sk_state != SMC_LISTEN &&
3097		    sk->sk_state != SMC_CLOSED) {
3098			if (val) {
3099				SMC_STAT_INC(smc, ndly_cnt);
3100				smc_tx_pending(&smc->conn);
3101				cancel_delayed_work(&smc->conn.tx_work);
3102			}
3103		}
3104		break;
3105	case TCP_CORK:
3106		if (sk->sk_state != SMC_INIT &&
3107		    sk->sk_state != SMC_LISTEN &&
3108		    sk->sk_state != SMC_CLOSED) {
3109			if (!val) {
3110				SMC_STAT_INC(smc, cork_cnt);
3111				smc_tx_pending(&smc->conn);
3112				cancel_delayed_work(&smc->conn.tx_work);
3113			}
3114		}
3115		break;
3116	case TCP_DEFER_ACCEPT:
3117		smc->sockopt_defer_accept = val;
3118		break;
3119	default:
3120		break;
3121	}
3122out:
3123	release_sock(sk);
3124
3125	return rc;
3126}
3127
3128static int smc_getsockopt(struct socket *sock, int level, int optname,
3129			  char __user *optval, int __user *optlen)
3130{
3131	struct smc_sock *smc;
3132	int rc;
3133
3134	if (level == SOL_SMC)
3135		return __smc_getsockopt(sock, level, optname, optval, optlen);
3136
3137	smc = smc_sk(sock->sk);
3138	mutex_lock(&smc->clcsock_release_lock);
3139	if (!smc->clcsock) {
3140		mutex_unlock(&smc->clcsock_release_lock);
3141		return -EBADF;
3142	}
3143	/* socket options apply to the CLC socket */
3144	if (unlikely(!smc->clcsock->ops->getsockopt)) {
3145		mutex_unlock(&smc->clcsock_release_lock);
3146		return -EOPNOTSUPP;
3147	}
3148	rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
3149					   optval, optlen);
3150	mutex_unlock(&smc->clcsock_release_lock);
3151	return rc;
3152}
3153
3154static int smc_ioctl(struct socket *sock, unsigned int cmd,
3155		     unsigned long arg)
3156{
3157	union smc_host_cursor cons, urg;
3158	struct smc_connection *conn;
3159	struct smc_sock *smc;
3160	int answ;
3161
3162	smc = smc_sk(sock->sk);
3163	conn = &smc->conn;
3164	lock_sock(&smc->sk);
3165	if (smc->use_fallback) {
3166		if (!smc->clcsock) {
3167			release_sock(&smc->sk);
3168			return -EBADF;
3169		}
3170		answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
3171		release_sock(&smc->sk);
3172		return answ;
3173	}
3174	switch (cmd) {
3175	case SIOCINQ: /* same as FIONREAD */
3176		if (smc->sk.sk_state == SMC_LISTEN) {
3177			release_sock(&smc->sk);
3178			return -EINVAL;
3179		}
3180		if (smc->sk.sk_state == SMC_INIT ||
3181		    smc->sk.sk_state == SMC_CLOSED)
3182			answ = 0;
3183		else
3184			answ = atomic_read(&smc->conn.bytes_to_rcv);
3185		break;
3186	case SIOCOUTQ:
3187		/* output queue size (not send + not acked) */
3188		if (smc->sk.sk_state == SMC_LISTEN) {
3189			release_sock(&smc->sk);
3190			return -EINVAL;
3191		}
3192		if (smc->sk.sk_state == SMC_INIT ||
3193		    smc->sk.sk_state == SMC_CLOSED)
3194			answ = 0;
3195		else
3196			answ = smc->conn.sndbuf_desc->len -
3197					atomic_read(&smc->conn.sndbuf_space);
3198		break;
3199	case SIOCOUTQNSD:
3200		/* output queue size (not send only) */
3201		if (smc->sk.sk_state == SMC_LISTEN) {
3202			release_sock(&smc->sk);
3203			return -EINVAL;
3204		}
3205		if (smc->sk.sk_state == SMC_INIT ||
3206		    smc->sk.sk_state == SMC_CLOSED)
3207			answ = 0;
3208		else
3209			answ = smc_tx_prepared_sends(&smc->conn);
3210		break;
3211	case SIOCATMARK:
3212		if (smc->sk.sk_state == SMC_LISTEN) {
3213			release_sock(&smc->sk);
3214			return -EINVAL;
3215		}
3216		if (smc->sk.sk_state == SMC_INIT ||
3217		    smc->sk.sk_state == SMC_CLOSED) {
3218			answ = 0;
3219		} else {
3220			smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
3221			smc_curs_copy(&urg, &conn->urg_curs, conn);
3222			answ = smc_curs_diff(conn->rmb_desc->len,
3223					     &cons, &urg) == 1;
3224		}
3225		break;
3226	default:
3227		release_sock(&smc->sk);
3228		return -ENOIOCTLCMD;
3229	}
3230	release_sock(&smc->sk);
3231
3232	return put_user(answ, (int __user *)arg);
3233}
3234
3235/* Map the affected portions of the rmbe into an spd, note the number of bytes
3236 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
3237 * updates till whenever a respective page has been fully processed.
3238 * Note that subsequent recv() calls have to wait till all splice() processing
3239 * completed.
3240 */
3241static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
3242			       struct pipe_inode_info *pipe, size_t len,
3243			       unsigned int flags)
3244{
3245	struct sock *sk = sock->sk;
3246	struct smc_sock *smc;
3247	int rc = -ENOTCONN;
3248
3249	smc = smc_sk(sk);
3250	lock_sock(sk);
3251	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
3252		/* socket was connected before, no more data to read */
3253		rc = 0;
3254		goto out;
3255	}
3256	if (sk->sk_state == SMC_INIT ||
3257	    sk->sk_state == SMC_LISTEN ||
3258	    sk->sk_state == SMC_CLOSED)
3259		goto out;
3260
3261	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
3262		rc = 0;
3263		goto out;
3264	}
3265
3266	if (smc->use_fallback) {
3267		rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
3268						    pipe, len, flags);
3269	} else {
3270		if (*ppos) {
3271			rc = -ESPIPE;
3272			goto out;
3273		}
3274		if (flags & SPLICE_F_NONBLOCK)
3275			flags = MSG_DONTWAIT;
3276		else
3277			flags = 0;
3278		SMC_STAT_INC(smc, splice_cnt);
3279		rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
3280	}
3281out:
3282	release_sock(sk);
3283
3284	return rc;
3285}
3286
3287/* must look like tcp */
3288static const struct proto_ops smc_sock_ops = {
3289	.family		= PF_SMC,
3290	.owner		= THIS_MODULE,
3291	.release	= smc_release,
3292	.bind		= smc_bind,
3293	.connect	= smc_connect,
3294	.socketpair	= sock_no_socketpair,
3295	.accept		= smc_accept,
3296	.getname	= smc_getname,
3297	.poll		= smc_poll,
3298	.ioctl		= smc_ioctl,
3299	.listen		= smc_listen,
3300	.shutdown	= smc_shutdown,
3301	.setsockopt	= smc_setsockopt,
3302	.getsockopt	= smc_getsockopt,
3303	.sendmsg	= smc_sendmsg,
3304	.recvmsg	= smc_recvmsg,
3305	.mmap		= sock_no_mmap,
3306	.splice_read	= smc_splice_read,
3307};
3308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3309static int __smc_create(struct net *net, struct socket *sock, int protocol,
3310			int kern, struct socket *clcsock)
3311{
3312	int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
3313	struct smc_sock *smc;
3314	struct sock *sk;
3315	int rc;
3316
3317	rc = -ESOCKTNOSUPPORT;
3318	if (sock->type != SOCK_STREAM)
3319		goto out;
3320
3321	rc = -EPROTONOSUPPORT;
3322	if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
3323		goto out;
3324
3325	rc = -ENOBUFS;
3326	sock->ops = &smc_sock_ops;
3327	sock->state = SS_UNCONNECTED;
3328	sk = smc_sock_alloc(net, sock, protocol);
3329	if (!sk)
3330		goto out;
3331
3332	/* create internal TCP socket for CLC handshake and fallback */
3333	smc = smc_sk(sk);
3334	smc->use_fallback = false; /* assume rdma capability first */
3335	smc->fallback_rsn = 0;
3336
3337	/* default behavior from limit_smc_hs in every net namespace */
3338	smc->limit_smc_hs = net->smc.limit_smc_hs;
3339
3340	rc = 0;
3341	if (!clcsock) {
3342		rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
3343				      &smc->clcsock);
3344		if (rc) {
3345			sk_common_release(sk);
3346			goto out;
3347		}
3348
3349		/* smc_clcsock_release() does not wait smc->clcsock->sk's
3350		 * destruction;  its sk_state might not be TCP_CLOSE after
3351		 * smc->sk is close()d, and TCP timers can be fired later,
3352		 * which need net ref.
3353		 */
3354		sk = smc->clcsock->sk;
3355		__netns_tracker_free(net, &sk->ns_tracker, false);
3356		sk->sk_net_refcnt = 1;
3357		get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
3358		sock_inuse_add(net, 1);
3359	} else {
3360		smc->clcsock = clcsock;
3361	}
 
3362
 
 
 
 
3363out:
3364	return rc;
3365}
3366
3367static int smc_create(struct net *net, struct socket *sock, int protocol,
3368		      int kern)
3369{
3370	return __smc_create(net, sock, protocol, kern, NULL);
3371}
3372
3373static const struct net_proto_family smc_sock_family_ops = {
3374	.family	= PF_SMC,
3375	.owner	= THIS_MODULE,
3376	.create	= smc_create,
3377};
3378
3379static int smc_ulp_init(struct sock *sk)
3380{
3381	struct socket *tcp = sk->sk_socket;
3382	struct net *net = sock_net(sk);
3383	struct socket *smcsock;
3384	int protocol, ret;
3385
3386	/* only TCP can be replaced */
3387	if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
3388	    (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
3389		return -ESOCKTNOSUPPORT;
3390	/* don't handle wq now */
3391	if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
3392		return -ENOTCONN;
3393
3394	if (sk->sk_family == AF_INET)
3395		protocol = SMCPROTO_SMC;
3396	else
3397		protocol = SMCPROTO_SMC6;
3398
3399	smcsock = sock_alloc();
3400	if (!smcsock)
3401		return -ENFILE;
3402
3403	smcsock->type = SOCK_STREAM;
3404	__module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
3405	ret = __smc_create(net, smcsock, protocol, 1, tcp);
3406	if (ret) {
3407		sock_release(smcsock); /* module_put() which ops won't be NULL */
3408		return ret;
3409	}
3410
3411	/* replace tcp socket to smc */
3412	smcsock->file = tcp->file;
3413	smcsock->file->private_data = smcsock;
3414	smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
3415	smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
3416	tcp->file = NULL;
3417
3418	return ret;
3419}
3420
3421static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
3422			  const gfp_t priority)
3423{
3424	struct inet_connection_sock *icsk = inet_csk(newsk);
3425
3426	/* don't inherit ulp ops to child when listen */
3427	icsk->icsk_ulp_ops = NULL;
3428}
3429
3430static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
3431	.name		= "smc",
3432	.owner		= THIS_MODULE,
3433	.init		= smc_ulp_init,
3434	.clone		= smc_ulp_clone,
3435};
3436
3437unsigned int smc_net_id;
3438
3439static __net_init int smc_net_init(struct net *net)
3440{
3441	int rc;
3442
3443	rc = smc_sysctl_net_init(net);
3444	if (rc)
3445		return rc;
3446	return smc_pnet_net_init(net);
3447}
3448
3449static void __net_exit smc_net_exit(struct net *net)
3450{
3451	smc_sysctl_net_exit(net);
3452	smc_pnet_net_exit(net);
3453}
3454
3455static __net_init int smc_net_stat_init(struct net *net)
3456{
3457	return smc_stats_init(net);
3458}
3459
3460static void __net_exit smc_net_stat_exit(struct net *net)
3461{
3462	smc_stats_exit(net);
3463}
3464
3465static struct pernet_operations smc_net_ops = {
3466	.init = smc_net_init,
3467	.exit = smc_net_exit,
3468	.id   = &smc_net_id,
3469	.size = sizeof(struct smc_net),
3470};
3471
3472static struct pernet_operations smc_net_stat_ops = {
3473	.init = smc_net_stat_init,
3474	.exit = smc_net_stat_exit,
3475};
3476
3477static int __init smc_init(void)
3478{
3479	int rc;
3480
3481	rc = register_pernet_subsys(&smc_net_ops);
3482	if (rc)
3483		return rc;
3484
3485	rc = register_pernet_subsys(&smc_net_stat_ops);
3486	if (rc)
3487		goto out_pernet_subsys;
3488
3489	rc = smc_ism_init();
3490	if (rc)
3491		goto out_pernet_subsys_stat;
3492	smc_clc_init();
3493
3494	rc = smc_nl_init();
3495	if (rc)
3496		goto out_ism;
3497
3498	rc = smc_pnet_init();
3499	if (rc)
3500		goto out_nl;
3501
3502	rc = -ENOMEM;
3503
3504	smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0);
3505	if (!smc_tcp_ls_wq)
3506		goto out_pnet;
3507
3508	smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
3509	if (!smc_hs_wq)
3510		goto out_alloc_tcp_ls_wq;
3511
3512	smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
3513	if (!smc_close_wq)
3514		goto out_alloc_hs_wq;
3515
3516	rc = smc_core_init();
3517	if (rc) {
3518		pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
3519		goto out_alloc_wqs;
3520	}
3521
3522	rc = smc_llc_init();
3523	if (rc) {
3524		pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
3525		goto out_core;
3526	}
3527
3528	rc = smc_cdc_init();
3529	if (rc) {
3530		pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
3531		goto out_core;
3532	}
3533
3534	rc = proto_register(&smc_proto, 1);
3535	if (rc) {
3536		pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
3537		goto out_core;
3538	}
3539
3540	rc = proto_register(&smc_proto6, 1);
3541	if (rc) {
3542		pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
3543		goto out_proto;
3544	}
3545
3546	rc = sock_register(&smc_sock_family_ops);
3547	if (rc) {
3548		pr_err("%s: sock_register fails with %d\n", __func__, rc);
3549		goto out_proto6;
3550	}
3551	INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
3552	INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
3553
3554	rc = smc_ib_register_client();
3555	if (rc) {
3556		pr_err("%s: ib_register fails with %d\n", __func__, rc);
3557		goto out_sock;
3558	}
3559
3560	rc = tcp_register_ulp(&smc_ulp_ops);
3561	if (rc) {
3562		pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
3563		goto out_ib;
3564	}
3565
 
 
 
 
 
 
 
 
 
 
3566	static_branch_enable(&tcp_have_smc);
3567	return 0;
3568
 
 
 
3569out_ib:
3570	smc_ib_unregister_client();
3571out_sock:
3572	sock_unregister(PF_SMC);
3573out_proto6:
3574	proto_unregister(&smc_proto6);
3575out_proto:
3576	proto_unregister(&smc_proto);
3577out_core:
3578	smc_core_exit();
3579out_alloc_wqs:
3580	destroy_workqueue(smc_close_wq);
3581out_alloc_hs_wq:
3582	destroy_workqueue(smc_hs_wq);
3583out_alloc_tcp_ls_wq:
3584	destroy_workqueue(smc_tcp_ls_wq);
3585out_pnet:
3586	smc_pnet_exit();
3587out_nl:
3588	smc_nl_exit();
3589out_ism:
3590	smc_clc_exit();
3591	smc_ism_exit();
3592out_pernet_subsys_stat:
3593	unregister_pernet_subsys(&smc_net_stat_ops);
3594out_pernet_subsys:
3595	unregister_pernet_subsys(&smc_net_ops);
3596
3597	return rc;
3598}
3599
3600static void __exit smc_exit(void)
3601{
3602	static_branch_disable(&tcp_have_smc);
 
3603	tcp_unregister_ulp(&smc_ulp_ops);
3604	sock_unregister(PF_SMC);
3605	smc_core_exit();
 
3606	smc_ib_unregister_client();
3607	smc_ism_exit();
3608	destroy_workqueue(smc_close_wq);
3609	destroy_workqueue(smc_tcp_ls_wq);
3610	destroy_workqueue(smc_hs_wq);
3611	proto_unregister(&smc_proto6);
3612	proto_unregister(&smc_proto);
3613	smc_pnet_exit();
3614	smc_nl_exit();
3615	smc_clc_exit();
3616	unregister_pernet_subsys(&smc_net_stat_ops);
3617	unregister_pernet_subsys(&smc_net_ops);
3618	rcu_barrier();
3619}
3620
3621module_init(smc_init);
3622module_exit(smc_exit);
3623
3624MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
3625MODULE_DESCRIPTION("smc socket address family");
3626MODULE_LICENSE("GPL");
3627MODULE_ALIAS_NETPROTO(PF_SMC);
3628MODULE_ALIAS_TCP_ULP("smc");
 
 
 
 
 
3629MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
   4 *
   5 *  AF_SMC protocol family socket handler keeping the AF_INET sock address type
   6 *  applies to SOCK_STREAM sockets only
   7 *  offers an alternative communication option for TCP-protocol sockets
   8 *  applicable with RoCE-cards only
   9 *
  10 *  Initial restrictions:
  11 *    - support for alternate links postponed
  12 *
  13 *  Copyright IBM Corp. 2016, 2018
  14 *
  15 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
  16 *              based on prototype from Frank Blaschka
  17 */
  18
  19#define KMSG_COMPONENT "smc"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/module.h>
  23#include <linux/socket.h>
  24#include <linux/workqueue.h>
  25#include <linux/in.h>
  26#include <linux/sched/signal.h>
  27#include <linux/if_vlan.h>
  28#include <linux/rcupdate_wait.h>
  29#include <linux/ctype.h>
  30#include <linux/splice.h>
  31
  32#include <net/sock.h>
  33#include <net/tcp.h>
  34#include <net/smc.h>
  35#include <asm/ioctls.h>
  36
  37#include <net/net_namespace.h>
  38#include <net/netns/generic.h>
  39#include "smc_netns.h"
  40
  41#include "smc.h"
  42#include "smc_clc.h"
  43#include "smc_llc.h"
  44#include "smc_cdc.h"
  45#include "smc_core.h"
  46#include "smc_ib.h"
  47#include "smc_ism.h"
  48#include "smc_pnet.h"
  49#include "smc_netlink.h"
  50#include "smc_tx.h"
  51#include "smc_rx.h"
  52#include "smc_close.h"
  53#include "smc_stats.h"
  54#include "smc_tracepoint.h"
  55#include "smc_sysctl.h"
  56#include "smc_loopback.h"
  57#include "smc_inet.h"
  58
  59static DEFINE_MUTEX(smc_server_lgr_pending);	/* serialize link group
  60						 * creation on server
  61						 */
  62static DEFINE_MUTEX(smc_client_lgr_pending);	/* serialize link group
  63						 * creation on client
  64						 */
  65
  66static struct workqueue_struct	*smc_tcp_ls_wq;	/* wq for tcp listen work */
  67struct workqueue_struct	*smc_hs_wq;	/* wq for handshake work */
  68struct workqueue_struct	*smc_close_wq;	/* wq for close work */
  69
  70static void smc_tcp_listen_work(struct work_struct *);
  71static void smc_connect_work(struct work_struct *);
  72
  73int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
  74{
  75	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
  76	void *hdr;
  77
  78	if (cb_ctx->pos[0])
  79		goto out;
  80
  81	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  82			  &smc_gen_nl_family, NLM_F_MULTI,
  83			  SMC_NETLINK_DUMP_HS_LIMITATION);
  84	if (!hdr)
  85		return -ENOMEM;
  86
  87	if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED,
  88		       sock_net(skb->sk)->smc.limit_smc_hs))
  89		goto err;
  90
  91	genlmsg_end(skb, hdr);
  92	cb_ctx->pos[0] = 1;
  93out:
  94	return skb->len;
  95err:
  96	genlmsg_cancel(skb, hdr);
  97	return -EMSGSIZE;
  98}
  99
 100int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
 101{
 102	sock_net(skb->sk)->smc.limit_smc_hs = true;
 103	return 0;
 104}
 105
 106int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
 107{
 108	sock_net(skb->sk)->smc.limit_smc_hs = false;
 109	return 0;
 110}
 111
 112static void smc_set_keepalive(struct sock *sk, int val)
 113{
 114	struct smc_sock *smc = smc_sk(sk);
 115
 116	smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
 117}
 118
 119static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
 120					  struct sk_buff *skb,
 121					  struct request_sock *req,
 122					  struct dst_entry *dst,
 123					  struct request_sock *req_unhash,
 124					  bool *own_req)
 125{
 126	struct smc_sock *smc;
 127	struct sock *child;
 128
 129	smc = smc_clcsock_user_data(sk);
 130
 131	if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
 132				sk->sk_max_ack_backlog)
 133		goto drop;
 134
 135	if (sk_acceptq_is_full(&smc->sk)) {
 136		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 137		goto drop;
 138	}
 139
 140	/* passthrough to original syn recv sock fct */
 141	child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
 142					       own_req);
 143	/* child must not inherit smc or its ops */
 144	if (child) {
 145		rcu_assign_sk_user_data(child, NULL);
 146
 147		/* v4-mapped sockets don't inherit parent ops. Don't restore. */
 148		if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
 149			inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
 150	}
 151	return child;
 152
 153drop:
 154	dst_release(dst);
 155	tcp_listendrop(sk);
 156	return NULL;
 157}
 158
 159static bool smc_hs_congested(const struct sock *sk)
 160{
 161	const struct smc_sock *smc;
 162
 163	smc = smc_clcsock_user_data(sk);
 164
 165	if (!smc)
 166		return true;
 167
 168	if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
 169		return true;
 170
 171	return false;
 172}
 173
 174struct smc_hashinfo smc_v4_hashinfo = {
 175	.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
 176};
 177
 178struct smc_hashinfo smc_v6_hashinfo = {
 179	.lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
 180};
 181
 182int smc_hash_sk(struct sock *sk)
 183{
 184	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
 185	struct hlist_head *head;
 186
 187	head = &h->ht;
 188
 189	write_lock_bh(&h->lock);
 190	sk_add_node(sk, head);
 191	write_unlock_bh(&h->lock);
 192	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 193
 194	return 0;
 195}
 
 196
 197void smc_unhash_sk(struct sock *sk)
 198{
 199	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
 200
 201	write_lock_bh(&h->lock);
 202	if (sk_del_node_init(sk))
 203		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 204	write_unlock_bh(&h->lock);
 205}
 
 206
 207/* This will be called before user really release sock_lock. So do the
 208 * work which we didn't do because of user hold the sock_lock in the
 209 * BH context
 210 */
 211void smc_release_cb(struct sock *sk)
 212{
 213	struct smc_sock *smc = smc_sk(sk);
 214
 215	if (smc->conn.tx_in_release_sock) {
 216		smc_tx_pending(&smc->conn);
 217		smc->conn.tx_in_release_sock = false;
 218	}
 219}
 220
 221struct proto smc_proto = {
 222	.name		= "SMC",
 223	.owner		= THIS_MODULE,
 224	.keepalive	= smc_set_keepalive,
 225	.hash		= smc_hash_sk,
 226	.unhash		= smc_unhash_sk,
 227	.release_cb	= smc_release_cb,
 228	.obj_size	= sizeof(struct smc_sock),
 229	.h.smc_hash	= &smc_v4_hashinfo,
 230	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
 231};
 232EXPORT_SYMBOL_GPL(smc_proto);
 233
 234struct proto smc_proto6 = {
 235	.name		= "SMC6",
 236	.owner		= THIS_MODULE,
 237	.keepalive	= smc_set_keepalive,
 238	.hash		= smc_hash_sk,
 239	.unhash		= smc_unhash_sk,
 240	.release_cb	= smc_release_cb,
 241	.obj_size	= sizeof(struct smc_sock),
 242	.h.smc_hash	= &smc_v6_hashinfo,
 243	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
 244};
 245EXPORT_SYMBOL_GPL(smc_proto6);
 246
 247static void smc_fback_restore_callbacks(struct smc_sock *smc)
 248{
 249	struct sock *clcsk = smc->clcsock->sk;
 250
 251	write_lock_bh(&clcsk->sk_callback_lock);
 252	clcsk->sk_user_data = NULL;
 253
 254	smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
 255	smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
 256	smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
 257	smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
 258
 259	write_unlock_bh(&clcsk->sk_callback_lock);
 260}
 261
 262static void smc_restore_fallback_changes(struct smc_sock *smc)
 263{
 264	if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
 265		smc->clcsock->file->private_data = smc->sk.sk_socket;
 266		smc->clcsock->file = NULL;
 267		smc_fback_restore_callbacks(smc);
 268	}
 269}
 270
 271static int __smc_release(struct smc_sock *smc)
 272{
 273	struct sock *sk = &smc->sk;
 274	int rc = 0;
 275
 276	if (!smc->use_fallback) {
 277		rc = smc_close_active(smc);
 278		smc_sock_set_flag(sk, SOCK_DEAD);
 279		sk->sk_shutdown |= SHUTDOWN_MASK;
 280	} else {
 281		if (sk->sk_state != SMC_CLOSED) {
 282			if (sk->sk_state != SMC_LISTEN &&
 283			    sk->sk_state != SMC_INIT)
 284				sock_put(sk); /* passive closing */
 285			if (sk->sk_state == SMC_LISTEN) {
 286				/* wake up clcsock accept */
 287				rc = kernel_sock_shutdown(smc->clcsock,
 288							  SHUT_RDWR);
 289			}
 290			sk->sk_state = SMC_CLOSED;
 291			sk->sk_state_change(sk);
 292		}
 293		smc_restore_fallback_changes(smc);
 294	}
 295
 296	sk->sk_prot->unhash(sk);
 297
 298	if (sk->sk_state == SMC_CLOSED) {
 299		if (smc->clcsock) {
 300			release_sock(sk);
 301			smc_clcsock_release(smc);
 302			lock_sock(sk);
 303		}
 304		if (!smc->use_fallback)
 305			smc_conn_free(&smc->conn);
 306	}
 307
 308	return rc;
 309}
 310
 311int smc_release(struct socket *sock)
 312{
 313	struct sock *sk = sock->sk;
 314	struct smc_sock *smc;
 315	int old_state, rc = 0;
 316
 317	if (!sk)
 318		goto out;
 319
 320	sock_hold(sk); /* sock_put below */
 321	smc = smc_sk(sk);
 322
 323	old_state = sk->sk_state;
 324
 325	/* cleanup for a dangling non-blocking connect */
 326	if (smc->connect_nonblock && old_state == SMC_INIT)
 327		tcp_abort(smc->clcsock->sk, ECONNABORTED);
 328
 329	if (cancel_work_sync(&smc->connect_work))
 330		sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
 331
 332	if (sk->sk_state == SMC_LISTEN)
 333		/* smc_close_non_accepted() is called and acquires
 334		 * sock lock for child sockets again
 335		 */
 336		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 337	else
 338		lock_sock(sk);
 339
 340	if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
 341	    !smc->use_fallback)
 342		smc_close_active_abort(smc);
 343
 344	rc = __smc_release(smc);
 345
 346	/* detach socket */
 347	sock_orphan(sk);
 348	sock->sk = NULL;
 349	release_sock(sk);
 350
 351	sock_put(sk); /* sock_hold above */
 352	sock_put(sk); /* final sock_put */
 353out:
 354	return rc;
 355}
 356
 357static void smc_destruct(struct sock *sk)
 358{
 359	if (sk->sk_state != SMC_CLOSED)
 360		return;
 361	if (!sock_flag(sk, SOCK_DEAD))
 362		return;
 363}
 364
 365void smc_sk_init(struct net *net, struct sock *sk, int protocol)
 
 366{
 367	struct smc_sock *smc = smc_sk(sk);
 
 
 
 
 
 
 
 368
 
 369	sk->sk_state = SMC_INIT;
 370	sk->sk_destruct = smc_destruct;
 371	sk->sk_protocol = protocol;
 372	WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
 373	WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
 
 374	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 375	INIT_WORK(&smc->connect_work, smc_connect_work);
 376	INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
 377	INIT_LIST_HEAD(&smc->accept_q);
 378	spin_lock_init(&smc->accept_q_lock);
 379	spin_lock_init(&smc->conn.send_lock);
 380	sk->sk_prot->hash(sk);
 381	mutex_init(&smc->clcsock_release_lock);
 382	smc_init_saved_callbacks(smc);
 383	smc->limit_smc_hs = net->smc.limit_smc_hs;
 384	smc->use_fallback = false; /* assume rdma capability first */
 385	smc->fallback_rsn = 0;
 386	smc_close_init(smc);
 387}
 388
 389static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
 390				   int protocol)
 391{
 392	struct proto *prot;
 393	struct sock *sk;
 394
 395	prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
 396	sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
 397	if (!sk)
 398		return NULL;
 399
 400	sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
 401	smc_sk_init(net, sk, protocol);
 402
 403	return sk;
 404}
 405
 406int smc_bind(struct socket *sock, struct sockaddr *uaddr,
 407	     int addr_len)
 408{
 409	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
 410	struct sock *sk = sock->sk;
 411	struct smc_sock *smc;
 412	int rc;
 413
 414	smc = smc_sk(sk);
 415
 416	/* replicate tests from inet_bind(), to be safe wrt. future changes */
 417	rc = -EINVAL;
 418	if (addr_len < sizeof(struct sockaddr_in))
 419		goto out;
 420
 421	rc = -EAFNOSUPPORT;
 422	if (addr->sin_family != AF_INET &&
 423	    addr->sin_family != AF_INET6 &&
 424	    addr->sin_family != AF_UNSPEC)
 425		goto out;
 426	/* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
 427	if (addr->sin_family == AF_UNSPEC &&
 428	    addr->sin_addr.s_addr != htonl(INADDR_ANY))
 429		goto out;
 430
 431	lock_sock(sk);
 432
 433	/* Check if socket is already active */
 434	rc = -EINVAL;
 435	if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
 436		goto out_rel;
 437
 438	smc->clcsock->sk->sk_reuse = sk->sk_reuse;
 439	smc->clcsock->sk->sk_reuseport = sk->sk_reuseport;
 440	rc = kernel_bind(smc->clcsock, uaddr, addr_len);
 441
 442out_rel:
 443	release_sock(sk);
 444out:
 445	return rc;
 446}
 447
 448/* copy only relevant settings and flags of SOL_SOCKET level from smc to
 449 * clc socket (since smc is not called for these options from net/core)
 450 */
 451
 452#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
 453			     (1UL << SOCK_KEEPOPEN) | \
 454			     (1UL << SOCK_LINGER) | \
 455			     (1UL << SOCK_BROADCAST) | \
 456			     (1UL << SOCK_TIMESTAMP) | \
 457			     (1UL << SOCK_DBG) | \
 458			     (1UL << SOCK_RCVTSTAMP) | \
 459			     (1UL << SOCK_RCVTSTAMPNS) | \
 460			     (1UL << SOCK_LOCALROUTE) | \
 461			     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
 462			     (1UL << SOCK_RXQ_OVFL) | \
 463			     (1UL << SOCK_WIFI_STATUS) | \
 464			     (1UL << SOCK_NOFCS) | \
 465			     (1UL << SOCK_FILTER_LOCKED) | \
 466			     (1UL << SOCK_TSTAMP_NEW))
 467
 468/* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */
 469static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
 470				     unsigned long mask)
 471{
 
 
 472	nsk->sk_userlocks = osk->sk_userlocks;
 473	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
 474		nsk->sk_sndbuf = osk->sk_sndbuf;
 475	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
 
 
 
 
 
 
 
 
 476		nsk->sk_rcvbuf = osk->sk_rcvbuf;
 
 
 
 
 
 
 
 
 477}
 478
 479static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
 480				   unsigned long mask)
 481{
 482	/* options we don't get control via setsockopt for */
 483	nsk->sk_type = osk->sk_type;
 484	nsk->sk_sndtimeo = osk->sk_sndtimeo;
 485	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
 486	nsk->sk_mark = READ_ONCE(osk->sk_mark);
 487	nsk->sk_priority = READ_ONCE(osk->sk_priority);
 488	nsk->sk_rcvlowat = osk->sk_rcvlowat;
 489	nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
 490	nsk->sk_err = osk->sk_err;
 491
 492	nsk->sk_flags &= ~mask;
 493	nsk->sk_flags |= osk->sk_flags & mask;
 494
 495	smc_adjust_sock_bufsizes(nsk, osk, mask);
 496}
 497
 498static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
 499{
 500	smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
 501}
 502
 503#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
 504			     (1UL << SOCK_KEEPOPEN) | \
 505			     (1UL << SOCK_LINGER) | \
 506			     (1UL << SOCK_DBG))
 507/* copy only settings and flags relevant for smc from clc to smc socket */
 508static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
 509{
 510	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
 511}
 512
 513/* register the new vzalloced sndbuf on all links */
 514static int smcr_lgr_reg_sndbufs(struct smc_link *link,
 515				struct smc_buf_desc *snd_desc)
 516{
 517	struct smc_link_group *lgr = link->lgr;
 518	int i, rc = 0;
 519
 520	if (!snd_desc->is_vm)
 521		return -EINVAL;
 522
 523	/* protect against parallel smcr_link_reg_buf() */
 524	down_write(&lgr->llc_conf_mutex);
 525	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 526		if (!smc_link_active(&lgr->lnk[i]))
 527			continue;
 528		rc = smcr_link_reg_buf(&lgr->lnk[i], snd_desc);
 529		if (rc)
 530			break;
 531	}
 532	up_write(&lgr->llc_conf_mutex);
 533	return rc;
 534}
 535
 536/* register the new rmb on all links */
 537static int smcr_lgr_reg_rmbs(struct smc_link *link,
 538			     struct smc_buf_desc *rmb_desc)
 539{
 540	struct smc_link_group *lgr = link->lgr;
 541	bool do_slow = false;
 542	int i, rc = 0;
 543
 544	rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
 545	if (rc)
 546		return rc;
 547
 548	down_read(&lgr->llc_conf_mutex);
 549	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 550		if (!smc_link_active(&lgr->lnk[i]))
 551			continue;
 552		if (!rmb_desc->is_reg_mr[link->link_idx]) {
 553			up_read(&lgr->llc_conf_mutex);
 554			goto slow_path;
 555		}
 556	}
 557	/* mr register already */
 558	goto fast_path;
 559slow_path:
 560	do_slow = true;
 561	/* protect against parallel smc_llc_cli_rkey_exchange() and
 562	 * parallel smcr_link_reg_buf()
 563	 */
 564	down_write(&lgr->llc_conf_mutex);
 565	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 566		if (!smc_link_active(&lgr->lnk[i]))
 567			continue;
 568		rc = smcr_link_reg_buf(&lgr->lnk[i], rmb_desc);
 569		if (rc)
 570			goto out;
 571	}
 572fast_path:
 573	/* exchange confirm_rkey msg with peer */
 574	rc = smc_llc_do_confirm_rkey(link, rmb_desc);
 575	if (rc) {
 576		rc = -EFAULT;
 577		goto out;
 578	}
 579	rmb_desc->is_conf_rkey = true;
 580out:
 581	do_slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex);
 582	smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
 583	return rc;
 584}
 585
 586static int smcr_clnt_conf_first_link(struct smc_sock *smc)
 587{
 588	struct smc_link *link = smc->conn.lnk;
 589	struct smc_llc_qentry *qentry;
 590	int rc;
 591
 592	/* Receive CONFIRM LINK request from server over RoCE fabric.
 593	 * Increasing the client's timeout by twice as much as the server's
 594	 * timeout by default can temporarily avoid decline messages of
 595	 * both sides crossing or colliding
 596	 */
 597	qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
 598			      SMC_LLC_CONFIRM_LINK);
 599	if (!qentry) {
 600		struct smc_clc_msg_decline dclc;
 601
 602		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
 603				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
 604		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
 605	}
 606	smc_llc_save_peer_uid(qentry);
 607	rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
 608	smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
 609	if (rc)
 610		return SMC_CLC_DECL_RMBE_EC;
 611
 612	rc = smc_ib_modify_qp_rts(link);
 613	if (rc)
 614		return SMC_CLC_DECL_ERR_RDYLNK;
 615
 616	smc_wr_remember_qp_attr(link);
 617
 618	/* reg the sndbuf if it was vzalloced */
 619	if (smc->conn.sndbuf_desc->is_vm) {
 620		if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
 621			return SMC_CLC_DECL_ERR_REGBUF;
 622	}
 623
 624	/* reg the rmb */
 625	if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
 626		return SMC_CLC_DECL_ERR_REGBUF;
 627
 628	/* confirm_rkey is implicit on 1st contact */
 629	smc->conn.rmb_desc->is_conf_rkey = true;
 630
 631	/* send CONFIRM LINK response over RoCE fabric */
 632	rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
 633	if (rc < 0)
 634		return SMC_CLC_DECL_TIMEOUT_CL;
 635
 636	smc_llc_link_active(link);
 637	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
 638
 639	if (link->lgr->max_links > 1) {
 640		/* optional 2nd link, receive ADD LINK request from server */
 641		qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
 642				      SMC_LLC_ADD_LINK);
 643		if (!qentry) {
 644			struct smc_clc_msg_decline dclc;
 645
 646			rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
 647					      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
 648			if (rc == -EAGAIN)
 649				rc = 0; /* no DECLINE received, go with one link */
 650			return rc;
 651		}
 652		smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
 653		smc_llc_cli_add_link(link, qentry);
 654	}
 655	return 0;
 656}
 657
 658static bool smc_isascii(char *hostname)
 659{
 660	int i;
 661
 662	for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
 663		if (!isascii(hostname[i]))
 664			return false;
 665	return true;
 666}
 667
 668static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
 669					struct smc_clc_msg_accept_confirm *clc)
 670{
 671	struct smc_clc_first_contact_ext *fce;
 672	int clc_v2_len;
 673
 674	if (clc->hdr.version == SMC_V1 ||
 675	    !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
 676		return;
 677
 678	if (smc->conn.lgr->is_smcd) {
 679		memcpy(smc->conn.lgr->negotiated_eid, clc->d1.eid,
 680		       SMC_MAX_EID_LEN);
 681		clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, d1);
 682	} else {
 683		memcpy(smc->conn.lgr->negotiated_eid, clc->r1.eid,
 684		       SMC_MAX_EID_LEN);
 685		clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, r1);
 686	}
 687	fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc) + clc_v2_len);
 688	smc->conn.lgr->peer_os = fce->os_type;
 689	smc->conn.lgr->peer_smc_release = fce->release;
 690	if (smc_isascii(fce->hostname))
 691		memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
 692		       SMC_MAX_HOSTNAME_LEN);
 693}
 694
 695static void smcr_conn_save_peer_info(struct smc_sock *smc,
 696				     struct smc_clc_msg_accept_confirm *clc)
 697{
 698	int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
 699
 700	smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
 701	smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
 702	smc->conn.peer_rmbe_size = bufsize;
 703	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
 704	smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
 705}
 706
 707static void smcd_conn_save_peer_info(struct smc_sock *smc,
 708				     struct smc_clc_msg_accept_confirm *clc)
 709{
 710	int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
 711
 712	smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
 713	smc->conn.peer_token = ntohll(clc->d0.token);
 714	/* msg header takes up space in the buffer */
 715	smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
 716	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
 717	smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
 718}
 719
 720static void smc_conn_save_peer_info(struct smc_sock *smc,
 721				    struct smc_clc_msg_accept_confirm *clc)
 722{
 723	if (smc->conn.lgr->is_smcd)
 724		smcd_conn_save_peer_info(smc, clc);
 725	else
 726		smcr_conn_save_peer_info(smc, clc);
 727	smc_conn_save_peer_info_fce(smc, clc);
 728}
 729
 730static void smc_link_save_peer_info(struct smc_link *link,
 731				    struct smc_clc_msg_accept_confirm *clc,
 732				    struct smc_init_info *ini)
 733{
 734	link->peer_qpn = ntoh24(clc->r0.qpn);
 735	memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
 736	memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
 737	link->peer_psn = ntoh24(clc->r0.psn);
 738	link->peer_mtu = clc->r0.qp_mtu;
 739}
 740
 741static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
 742				       struct smc_stats_fback *fback_arr)
 743{
 744	int cnt;
 745
 746	for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
 747		if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
 748			fback_arr[cnt].count++;
 749			break;
 750		}
 751		if (!fback_arr[cnt].fback_code) {
 752			fback_arr[cnt].fback_code = smc->fallback_rsn;
 753			fback_arr[cnt].count++;
 754			break;
 755		}
 756	}
 757}
 758
 759static void smc_stat_fallback(struct smc_sock *smc)
 760{
 761	struct net *net = sock_net(&smc->sk);
 762
 763	mutex_lock(&net->smc.mutex_fback_rsn);
 764	if (smc->listen_smc) {
 765		smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
 766		net->smc.fback_rsn->srv_fback_cnt++;
 767	} else {
 768		smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
 769		net->smc.fback_rsn->clnt_fback_cnt++;
 770	}
 771	mutex_unlock(&net->smc.mutex_fback_rsn);
 772}
 773
 774/* must be called under rcu read lock */
 775static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
 776{
 777	struct socket_wq *wq;
 778	__poll_t flags;
 779
 780	wq = rcu_dereference(smc->sk.sk_wq);
 781	if (!skwq_has_sleeper(wq))
 782		return;
 783
 784	/* wake up smc sk->sk_wq */
 785	if (!key) {
 786		/* sk_state_change */
 787		wake_up_interruptible_all(&wq->wait);
 788	} else {
 789		flags = key_to_poll(key);
 790		if (flags & (EPOLLIN | EPOLLOUT))
 791			/* sk_data_ready or sk_write_space */
 792			wake_up_interruptible_sync_poll(&wq->wait, flags);
 793		else if (flags & EPOLLERR)
 794			/* sk_error_report */
 795			wake_up_interruptible_poll(&wq->wait, flags);
 796	}
 797}
 798
 799static int smc_fback_mark_woken(wait_queue_entry_t *wait,
 800				unsigned int mode, int sync, void *key)
 801{
 802	struct smc_mark_woken *mark =
 803		container_of(wait, struct smc_mark_woken, wait_entry);
 804
 805	mark->woken = true;
 806	mark->key = key;
 807	return 0;
 808}
 809
 810static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
 811				     void (*clcsock_callback)(struct sock *sk))
 812{
 813	struct smc_mark_woken mark = { .woken = false };
 814	struct socket_wq *wq;
 815
 816	init_waitqueue_func_entry(&mark.wait_entry,
 817				  smc_fback_mark_woken);
 818	rcu_read_lock();
 819	wq = rcu_dereference(clcsk->sk_wq);
 820	if (!wq)
 821		goto out;
 822	add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
 823	clcsock_callback(clcsk);
 824	remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
 825
 826	if (mark.woken)
 827		smc_fback_wakeup_waitqueue(smc, mark.key);
 828out:
 829	rcu_read_unlock();
 830}
 831
 832static void smc_fback_state_change(struct sock *clcsk)
 833{
 834	struct smc_sock *smc;
 835
 836	read_lock_bh(&clcsk->sk_callback_lock);
 837	smc = smc_clcsock_user_data(clcsk);
 838	if (smc)
 839		smc_fback_forward_wakeup(smc, clcsk,
 840					 smc->clcsk_state_change);
 841	read_unlock_bh(&clcsk->sk_callback_lock);
 842}
 843
 844static void smc_fback_data_ready(struct sock *clcsk)
 845{
 846	struct smc_sock *smc;
 847
 848	read_lock_bh(&clcsk->sk_callback_lock);
 849	smc = smc_clcsock_user_data(clcsk);
 850	if (smc)
 851		smc_fback_forward_wakeup(smc, clcsk,
 852					 smc->clcsk_data_ready);
 853	read_unlock_bh(&clcsk->sk_callback_lock);
 854}
 855
 856static void smc_fback_write_space(struct sock *clcsk)
 857{
 858	struct smc_sock *smc;
 859
 860	read_lock_bh(&clcsk->sk_callback_lock);
 861	smc = smc_clcsock_user_data(clcsk);
 862	if (smc)
 863		smc_fback_forward_wakeup(smc, clcsk,
 864					 smc->clcsk_write_space);
 865	read_unlock_bh(&clcsk->sk_callback_lock);
 866}
 867
 868static void smc_fback_error_report(struct sock *clcsk)
 869{
 870	struct smc_sock *smc;
 871
 872	read_lock_bh(&clcsk->sk_callback_lock);
 873	smc = smc_clcsock_user_data(clcsk);
 874	if (smc)
 875		smc_fback_forward_wakeup(smc, clcsk,
 876					 smc->clcsk_error_report);
 877	read_unlock_bh(&clcsk->sk_callback_lock);
 878}
 879
 880static void smc_fback_replace_callbacks(struct smc_sock *smc)
 881{
 882	struct sock *clcsk = smc->clcsock->sk;
 883
 884	write_lock_bh(&clcsk->sk_callback_lock);
 885	clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
 886
 887	smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
 888			       &smc->clcsk_state_change);
 889	smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
 890			       &smc->clcsk_data_ready);
 891	smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
 892			       &smc->clcsk_write_space);
 893	smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
 894			       &smc->clcsk_error_report);
 895
 896	write_unlock_bh(&clcsk->sk_callback_lock);
 897}
 898
 899static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
 900{
 901	int rc = 0;
 902
 903	mutex_lock(&smc->clcsock_release_lock);
 904	if (!smc->clcsock) {
 905		rc = -EBADF;
 906		goto out;
 907	}
 908
 909	smc->use_fallback = true;
 910	smc->fallback_rsn = reason_code;
 911	smc_stat_fallback(smc);
 912	trace_smc_switch_to_fallback(smc, reason_code);
 913	if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
 914		smc->clcsock->file = smc->sk.sk_socket->file;
 915		smc->clcsock->file->private_data = smc->clcsock;
 916		smc->clcsock->wq.fasync_list =
 917			smc->sk.sk_socket->wq.fasync_list;
 918		smc->sk.sk_socket->wq.fasync_list = NULL;
 919
 920		/* There might be some wait entries remaining
 921		 * in smc sk->sk_wq and they should be woken up
 922		 * as clcsock's wait queue is woken up.
 923		 */
 924		smc_fback_replace_callbacks(smc);
 925	}
 926out:
 927	mutex_unlock(&smc->clcsock_release_lock);
 928	return rc;
 929}
 930
 931/* fall back during connect */
 932static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
 933{
 934	struct net *net = sock_net(&smc->sk);
 935	int rc = 0;
 936
 937	rc = smc_switch_to_fallback(smc, reason_code);
 938	if (rc) { /* fallback fails */
 939		this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
 940		if (smc->sk.sk_state == SMC_INIT)
 941			sock_put(&smc->sk); /* passive closing */
 942		return rc;
 943	}
 944	smc_copy_sock_settings_to_clc(smc);
 945	smc->connect_nonblock = 0;
 946	if (smc->sk.sk_state == SMC_INIT)
 947		smc->sk.sk_state = SMC_ACTIVE;
 948	return 0;
 949}
 950
 951/* decline and fall back during connect */
 952static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
 953					u8 version)
 954{
 955	struct net *net = sock_net(&smc->sk);
 956	int rc;
 957
 958	if (reason_code < 0) { /* error, fallback is not possible */
 959		this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
 960		if (smc->sk.sk_state == SMC_INIT)
 961			sock_put(&smc->sk); /* passive closing */
 962		return reason_code;
 963	}
 964	if (reason_code != SMC_CLC_DECL_PEERDECL) {
 965		rc = smc_clc_send_decline(smc, reason_code, version);
 966		if (rc < 0) {
 967			this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
 968			if (smc->sk.sk_state == SMC_INIT)
 969				sock_put(&smc->sk); /* passive closing */
 970			return rc;
 971		}
 972	}
 973	return smc_connect_fallback(smc, reason_code);
 974}
 975
 976static void smc_conn_abort(struct smc_sock *smc, int local_first)
 977{
 978	struct smc_connection *conn = &smc->conn;
 979	struct smc_link_group *lgr = conn->lgr;
 980	bool lgr_valid = false;
 981
 982	if (smc_conn_lgr_valid(conn))
 983		lgr_valid = true;
 984
 985	smc_conn_free(conn);
 986	if (local_first && lgr_valid)
 987		smc_lgr_cleanup_early(lgr);
 988}
 989
 990/* check if there is a rdma device available for this connection. */
 991/* called for connect and listen */
 992static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
 993{
 994	/* PNET table look up: search active ib_device and port
 995	 * within same PNETID that also contains the ethernet device
 996	 * used for the internal TCP socket
 997	 */
 998	smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
 999	if (!ini->check_smcrv2 && !ini->ib_dev)
1000		return SMC_CLC_DECL_NOSMCRDEV;
1001	if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
1002		return SMC_CLC_DECL_NOSMCRDEV;
1003	return 0;
1004}
1005
1006/* check if there is an ISM device available for this connection. */
1007/* called for connect and listen */
1008static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
1009{
1010	/* Find ISM device with same PNETID as connecting interface  */
1011	smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
1012	if (!ini->ism_dev[0])
1013		return SMC_CLC_DECL_NOSMCDDEV;
1014	else
1015		ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
1016	return 0;
1017}
1018
1019/* is chid unique for the ism devices that are already determined? */
1020static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
1021					   int cnt)
1022{
1023	int i = (!ini->ism_dev[0]) ? 1 : 0;
1024
1025	for (; i < cnt; i++)
1026		if (ini->ism_chid[i] == chid)
1027			return false;
1028	return true;
1029}
1030
1031/* determine possible V2 ISM devices (either without PNETID or with PNETID plus
1032 * PNETID matching net_device)
1033 */
1034static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
1035				       struct smc_init_info *ini)
1036{
1037	int rc = SMC_CLC_DECL_NOSMCDDEV;
1038	struct smcd_dev *smcd;
1039	int i = 1, entry = 1;
1040	bool is_emulated;
1041	u16 chid;
1042
1043	if (smcd_indicated(ini->smc_type_v1))
1044		rc = 0;		/* already initialized for V1 */
1045	mutex_lock(&smcd_dev_list.mutex);
1046	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1047		if (smcd->going_away || smcd == ini->ism_dev[0])
1048			continue;
1049		chid = smc_ism_get_chid(smcd);
1050		if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
1051			continue;
1052		is_emulated = __smc_ism_is_emulated(chid);
1053		if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
1054		    smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
1055			if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
1056				/* It's the last GID-CHID entry left in CLC
1057				 * Proposal SMC-Dv2 extension, but an Emulated-
1058				 * ISM device will take two entries. So give
1059				 * up it and try the next potential ISM device.
1060				 */
1061				continue;
1062			ini->ism_dev[i] = smcd;
1063			ini->ism_chid[i] = chid;
1064			ini->is_smcd = true;
1065			rc = 0;
1066			i++;
1067			entry = is_emulated ? entry + 2 : entry + 1;
1068			if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
1069				break;
1070		}
1071	}
1072	mutex_unlock(&smcd_dev_list.mutex);
1073	ini->ism_offered_cnt = i - 1;
1074	if (!ini->ism_dev[0] && !ini->ism_dev[1])
1075		ini->smcd_version = 0;
1076
1077	return rc;
1078}
1079
1080/* Check for VLAN ID and register it on ISM device just for CLC handshake */
1081static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
1082				      struct smc_init_info *ini)
1083{
1084	if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
1085		return SMC_CLC_DECL_ISMVLANERR;
1086	return 0;
1087}
1088
1089static int smc_find_proposal_devices(struct smc_sock *smc,
1090				     struct smc_init_info *ini)
1091{
1092	int rc = 0;
1093
1094	/* check if there is an ism device available */
1095	if (!(ini->smcd_version & SMC_V1) ||
1096	    smc_find_ism_device(smc, ini) ||
1097	    smc_connect_ism_vlan_setup(smc, ini))
1098		ini->smcd_version &= ~SMC_V1;
1099	/* else ISM V1 is supported for this connection */
1100
1101	/* check if there is an rdma device available */
1102	if (!(ini->smcr_version & SMC_V1) ||
1103	    smc_find_rdma_device(smc, ini))
1104		ini->smcr_version &= ~SMC_V1;
1105	/* else RDMA is supported for this connection */
1106
1107	ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
1108					      ini->smcr_version & SMC_V1);
1109
1110	/* check if there is an ism v2 device available */
1111	if (!(ini->smcd_version & SMC_V2) ||
1112	    !smc_ism_is_v2_capable() ||
1113	    smc_find_ism_v2_device_clnt(smc, ini))
1114		ini->smcd_version &= ~SMC_V2;
1115
1116	/* check if there is an rdma v2 device available */
1117	ini->check_smcrv2 = true;
1118	ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
1119	if (!(ini->smcr_version & SMC_V2) ||
1120	    smc->clcsock->sk->sk_family != AF_INET ||
1121	    !smc_clc_ueid_count() ||
1122	    smc_find_rdma_device(smc, ini))
1123		ini->smcr_version &= ~SMC_V2;
1124	ini->check_smcrv2 = false;
1125
1126	ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
1127					      ini->smcr_version & SMC_V2);
1128
1129	/* if neither ISM nor RDMA are supported, fallback */
1130	if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
1131		rc = SMC_CLC_DECL_NOSMCDEV;
1132
1133	return rc;
1134}
1135
1136/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
1137 * used, the VLAN ID will be registered again during the connection setup.
1138 */
1139static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
1140					struct smc_init_info *ini)
1141{
1142	if (!smcd_indicated(ini->smc_type_v1))
1143		return 0;
1144	if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
1145		return SMC_CLC_DECL_CNFERR;
1146	return 0;
1147}
1148
1149#define SMC_CLC_MAX_ACCEPT_LEN \
1150	(sizeof(struct smc_clc_msg_accept_confirm) + \
1151	 sizeof(struct smc_clc_first_contact_ext_v2x) + \
1152	 sizeof(struct smc_clc_msg_trail))
1153
1154/* CLC handshake during connect */
1155static int smc_connect_clc(struct smc_sock *smc,
1156			   struct smc_clc_msg_accept_confirm *aclc,
1157			   struct smc_init_info *ini)
1158{
1159	int rc = 0;
1160
1161	/* do inband token exchange */
1162	rc = smc_clc_send_proposal(smc, ini);
1163	if (rc)
1164		return rc;
1165	/* receive SMC Accept CLC message */
1166	return smc_clc_wait_msg(smc, aclc, SMC_CLC_MAX_ACCEPT_LEN,
1167				SMC_CLC_ACCEPT, CLC_WAIT_TIME);
1168}
1169
1170void smc_fill_gid_list(struct smc_link_group *lgr,
1171		       struct smc_gidlist *gidlist,
1172		       struct smc_ib_device *known_dev, u8 *known_gid)
1173{
1174	struct smc_init_info *alt_ini = NULL;
1175
1176	memset(gidlist, 0, sizeof(*gidlist));
1177	memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
1178
1179	alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
1180	if (!alt_ini)
1181		goto out;
1182
1183	alt_ini->vlan_id = lgr->vlan_id;
1184	alt_ini->check_smcrv2 = true;
1185	alt_ini->smcrv2.saddr = lgr->saddr;
1186	smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
1187
1188	if (!alt_ini->smcrv2.ib_dev_v2)
1189		goto out;
1190
1191	memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
1192	       SMC_GID_SIZE);
1193
1194out:
1195	kfree(alt_ini);
1196}
1197
1198static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
1199				       struct smc_clc_msg_accept_confirm *aclc,
1200				       struct smc_init_info *ini)
1201{
1202	struct smc_clc_first_contact_ext *fce =
1203		smc_get_clc_first_contact_ext(aclc, false);
1204	struct net *net = sock_net(&smc->sk);
1205	int rc;
1206
1207	if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
1208		return 0;
1209
1210	if (fce->v2_direct) {
1211		memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
1212		ini->smcrv2.uses_gateway = false;
1213	} else {
1214		if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
1215				      smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
1216				      ini->smcrv2.nexthop_mac,
1217				      &ini->smcrv2.uses_gateway))
1218			return SMC_CLC_DECL_NOROUTE;
1219		if (!ini->smcrv2.uses_gateway) {
1220			/* mismatch: peer claims indirect, but its direct */
1221			return SMC_CLC_DECL_NOINDIRECT;
1222		}
1223	}
1224
1225	ini->release_nr = fce->release;
1226	rc = smc_clc_clnt_v2x_features_validate(fce, ini);
1227	if (rc)
1228		return rc;
1229
1230	return 0;
1231}
1232
1233/* setup for RDMA connection of client */
1234static int smc_connect_rdma(struct smc_sock *smc,
1235			    struct smc_clc_msg_accept_confirm *aclc,
1236			    struct smc_init_info *ini)
1237{
1238	int i, reason_code = 0;
1239	struct smc_link *link;
1240	u8 *eid = NULL;
1241
1242	ini->is_smcd = false;
1243	ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
1244	ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1245	memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
1246	memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
1247	memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
1248	ini->max_conns = SMC_CONN_PER_LGR_MAX;
1249	ini->max_links = SMC_LINKS_ADD_LNK_MAX;
1250
1251	reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
1252	if (reason_code)
1253		return reason_code;
1254
1255	mutex_lock(&smc_client_lgr_pending);
1256	reason_code = smc_conn_create(smc, ini);
1257	if (reason_code) {
1258		mutex_unlock(&smc_client_lgr_pending);
1259		return reason_code;
1260	}
1261
1262	smc_conn_save_peer_info(smc, aclc);
1263
1264	if (ini->first_contact_local) {
1265		link = smc->conn.lnk;
1266	} else {
1267		/* set link that was assigned by server */
1268		link = NULL;
1269		for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1270			struct smc_link *l = &smc->conn.lgr->lnk[i];
1271
1272			if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
1273			    !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
1274				    SMC_GID_SIZE) &&
1275			    (aclc->hdr.version > SMC_V1 ||
1276			     !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
1277				     sizeof(l->peer_mac)))) {
1278				link = l;
1279				break;
1280			}
1281		}
1282		if (!link) {
1283			reason_code = SMC_CLC_DECL_NOSRVLINK;
1284			goto connect_abort;
1285		}
1286		smc_switch_link_and_count(&smc->conn, link);
1287	}
1288
1289	/* create send buffer and rmb */
1290	if (smc_buf_create(smc, false)) {
1291		reason_code = SMC_CLC_DECL_MEM;
1292		goto connect_abort;
1293	}
1294
1295	if (ini->first_contact_local)
1296		smc_link_save_peer_info(link, aclc, ini);
1297
1298	if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
1299		reason_code = SMC_CLC_DECL_ERR_RTOK;
1300		goto connect_abort;
1301	}
1302
 
1303	smc_rx_init(smc);
1304
1305	if (ini->first_contact_local) {
1306		if (smc_ib_ready_link(link)) {
1307			reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1308			goto connect_abort;
1309		}
1310	} else {
1311		/* reg sendbufs if they were vzalloced */
1312		if (smc->conn.sndbuf_desc->is_vm) {
1313			if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) {
1314				reason_code = SMC_CLC_DECL_ERR_REGBUF;
1315				goto connect_abort;
1316			}
1317		}
1318		if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
1319			reason_code = SMC_CLC_DECL_ERR_REGBUF;
1320			goto connect_abort;
1321		}
1322	}
1323
1324	if (aclc->hdr.version > SMC_V1) {
1325		eid = aclc->r1.eid;
1326		if (ini->first_contact_local)
1327			smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
1328					  link->smcibdev, link->gid);
1329	}
1330
1331	reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
1332					   aclc->hdr.version, eid, ini);
1333	if (reason_code)
1334		goto connect_abort;
1335
1336	smc_tx_init(smc);
1337
1338	if (ini->first_contact_local) {
1339		/* QP confirmation over RoCE fabric */
1340		smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1341		reason_code = smcr_clnt_conf_first_link(smc);
1342		smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1343		if (reason_code)
1344			goto connect_abort;
1345	}
1346	mutex_unlock(&smc_client_lgr_pending);
1347
1348	smc_copy_sock_settings_to_clc(smc);
1349	smc->connect_nonblock = 0;
1350	if (smc->sk.sk_state == SMC_INIT)
1351		smc->sk.sk_state = SMC_ACTIVE;
1352
1353	return 0;
1354connect_abort:
1355	smc_conn_abort(smc, ini->first_contact_local);
1356	mutex_unlock(&smc_client_lgr_pending);
1357	smc->connect_nonblock = 0;
1358
1359	return reason_code;
1360}
1361
1362/* The server has chosen one of the proposed ISM devices for the communication.
1363 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1364 */
1365static int
1366smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm *aclc,
1367			       struct smc_init_info *ini)
1368{
1369	int i;
1370
1371	for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1372		if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
1373			ini->ism_selected = i;
1374			return 0;
1375		}
1376	}
1377
1378	return -EPROTO;
1379}
1380
1381/* setup for ISM connection of client */
1382static int smc_connect_ism(struct smc_sock *smc,
1383			   struct smc_clc_msg_accept_confirm *aclc,
1384			   struct smc_init_info *ini)
1385{
1386	u8 *eid = NULL;
1387	int rc = 0;
1388
1389	ini->is_smcd = true;
1390	ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1391
1392	if (aclc->hdr.version == SMC_V2) {
1393		if (ini->first_contact_peer) {
1394			struct smc_clc_first_contact_ext *fce =
1395				smc_get_clc_first_contact_ext(aclc, true);
1396
1397			ini->release_nr = fce->release;
1398			rc = smc_clc_clnt_v2x_features_validate(fce, ini);
1399			if (rc)
1400				return rc;
1401		}
1402
1403		rc = smc_v2_determine_accepted_chid(aclc, ini);
1404		if (rc)
1405			return rc;
1406
1407		if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
1408			ini->ism_peer_gid[ini->ism_selected].gid_ext =
1409						ntohll(aclc->d1.gid_ext);
1410		/* for non-Emulated-ISM devices, peer gid_ext remains 0. */
1411	}
1412	ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
1413
1414	/* there is only one lgr role for SMC-D; use server lock */
1415	mutex_lock(&smc_server_lgr_pending);
1416	rc = smc_conn_create(smc, ini);
1417	if (rc) {
1418		mutex_unlock(&smc_server_lgr_pending);
1419		return rc;
1420	}
1421
1422	/* Create send and receive buffers */
1423	rc = smc_buf_create(smc, true);
1424	if (rc) {
1425		rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1426		goto connect_abort;
1427	}
1428
1429	smc_conn_save_peer_info(smc, aclc);
1430
1431	if (smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd)) {
1432		rc = smcd_buf_attach(smc);
1433		if (rc) {
1434			rc = SMC_CLC_DECL_MEM;	/* try to fallback */
1435			goto connect_abort;
1436		}
1437	}
1438	smc_rx_init(smc);
1439	smc_tx_init(smc);
1440
1441	if (aclc->hdr.version > SMC_V1)
1442		eid = aclc->d1.eid;
1443
1444	rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1445				  aclc->hdr.version, eid, ini);
1446	if (rc)
1447		goto connect_abort;
1448	mutex_unlock(&smc_server_lgr_pending);
1449
1450	smc_copy_sock_settings_to_clc(smc);
1451	smc->connect_nonblock = 0;
1452	if (smc->sk.sk_state == SMC_INIT)
1453		smc->sk.sk_state = SMC_ACTIVE;
1454
1455	return 0;
1456connect_abort:
1457	smc_conn_abort(smc, ini->first_contact_local);
1458	mutex_unlock(&smc_server_lgr_pending);
1459	smc->connect_nonblock = 0;
1460
1461	return rc;
1462}
1463
1464/* check if received accept type and version matches a proposed one */
1465static int smc_connect_check_aclc(struct smc_init_info *ini,
1466				  struct smc_clc_msg_accept_confirm *aclc)
1467{
 
 
 
 
1468	if (aclc->hdr.version >= SMC_V2) {
1469		if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1470		     !smcr_indicated(ini->smc_type_v2)) ||
1471		    (aclc->hdr.typev1 == SMC_TYPE_D &&
1472		     !smcd_indicated(ini->smc_type_v2)))
1473			return SMC_CLC_DECL_MODEUNSUPP;
1474	} else {
1475		if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1476		     !smcr_indicated(ini->smc_type_v1)) ||
1477		    (aclc->hdr.typev1 == SMC_TYPE_D &&
1478		     !smcd_indicated(ini->smc_type_v1)))
1479			return SMC_CLC_DECL_MODEUNSUPP;
1480	}
1481
1482	return 0;
1483}
1484
1485/* perform steps before actually connecting */
1486static int __smc_connect(struct smc_sock *smc)
1487{
1488	u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1489	struct smc_clc_msg_accept_confirm *aclc;
1490	struct smc_init_info *ini = NULL;
1491	u8 *buf = NULL;
1492	int rc = 0;
1493
1494	if (smc->use_fallback)
1495		return smc_connect_fallback(smc, smc->fallback_rsn);
1496
1497	/* if peer has not signalled SMC-capability, fall back */
1498	if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1499		return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1500
1501	/* IPSec connections opt out of SMC optimizations */
1502	if (using_ipsec(smc))
1503		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1504						    version);
1505
1506	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1507	if (!ini)
1508		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1509						    version);
1510
1511	ini->smcd_version = SMC_V1 | SMC_V2;
1512	ini->smcr_version = SMC_V1 | SMC_V2;
1513	ini->smc_type_v1 = SMC_TYPE_B;
1514	ini->smc_type_v2 = SMC_TYPE_B;
1515
1516	/* get vlan id from IP device */
1517	if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1518		ini->smcd_version &= ~SMC_V1;
1519		ini->smcr_version = 0;
1520		ini->smc_type_v1 = SMC_TYPE_N;
 
 
 
 
1521	}
1522
1523	rc = smc_find_proposal_devices(smc, ini);
1524	if (rc)
1525		goto fallback;
1526
1527	buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1528	if (!buf) {
1529		rc = SMC_CLC_DECL_MEM;
1530		goto fallback;
1531	}
1532	aclc = (struct smc_clc_msg_accept_confirm *)buf;
1533
1534	/* perform CLC handshake */
1535	rc = smc_connect_clc(smc, aclc, ini);
1536	if (rc) {
1537		/* -EAGAIN on timeout, see tcp_recvmsg() */
1538		if (rc == -EAGAIN) {
1539			rc = -ETIMEDOUT;
1540			smc->sk.sk_err = ETIMEDOUT;
1541		}
1542		goto vlan_cleanup;
1543	}
1544
1545	/* check if smc modes and versions of CLC proposal and accept match */
1546	rc = smc_connect_check_aclc(ini, aclc);
1547	version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1548	if (rc)
1549		goto vlan_cleanup;
1550
1551	/* depending on previous steps, connect using rdma or ism */
1552	if (aclc->hdr.typev1 == SMC_TYPE_R) {
1553		ini->smcr_version = version;
1554		rc = smc_connect_rdma(smc, aclc, ini);
1555	} else if (aclc->hdr.typev1 == SMC_TYPE_D) {
1556		ini->smcd_version = version;
1557		rc = smc_connect_ism(smc, aclc, ini);
1558	}
1559	if (rc)
1560		goto vlan_cleanup;
1561
1562	SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1563	smc_connect_ism_vlan_cleanup(smc, ini);
1564	kfree(buf);
1565	kfree(ini);
1566	return 0;
1567
1568vlan_cleanup:
1569	smc_connect_ism_vlan_cleanup(smc, ini);
1570	kfree(buf);
1571fallback:
1572	kfree(ini);
1573	return smc_connect_decline_fallback(smc, rc, version);
1574}
1575
1576static void smc_connect_work(struct work_struct *work)
1577{
1578	struct smc_sock *smc = container_of(work, struct smc_sock,
1579					    connect_work);
1580	long timeo = smc->sk.sk_sndtimeo;
1581	int rc = 0;
1582
1583	if (!timeo)
1584		timeo = MAX_SCHEDULE_TIMEOUT;
1585	lock_sock(smc->clcsock->sk);
1586	if (smc->clcsock->sk->sk_err) {
1587		smc->sk.sk_err = smc->clcsock->sk->sk_err;
1588	} else if ((1 << smc->clcsock->sk->sk_state) &
1589					(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1590		rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1591		if ((rc == -EPIPE) &&
1592		    ((1 << smc->clcsock->sk->sk_state) &
1593					(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1594			rc = 0;
1595	}
1596	release_sock(smc->clcsock->sk);
1597	lock_sock(&smc->sk);
1598	if (rc != 0 || smc->sk.sk_err) {
1599		smc->sk.sk_state = SMC_CLOSED;
1600		if (rc == -EPIPE || rc == -EAGAIN)
1601			smc->sk.sk_err = EPIPE;
1602		else if (rc == -ECONNREFUSED)
1603			smc->sk.sk_err = ECONNREFUSED;
1604		else if (signal_pending(current))
1605			smc->sk.sk_err = -sock_intr_errno(timeo);
1606		sock_put(&smc->sk); /* passive closing */
1607		goto out;
1608	}
1609
1610	rc = __smc_connect(smc);
1611	if (rc < 0)
1612		smc->sk.sk_err = -rc;
1613
1614out:
1615	if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1616		if (smc->sk.sk_err) {
1617			smc->sk.sk_state_change(&smc->sk);
1618		} else { /* allow polling before and after fallback decision */
1619			smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1620			smc->sk.sk_write_space(&smc->sk);
1621		}
1622	}
1623	release_sock(&smc->sk);
1624}
1625
1626int smc_connect(struct socket *sock, struct sockaddr *addr,
1627		int alen, int flags)
1628{
1629	struct sock *sk = sock->sk;
1630	struct smc_sock *smc;
1631	int rc = -EINVAL;
1632
1633	smc = smc_sk(sk);
1634
1635	/* separate smc parameter checking to be safe */
1636	if (alen < sizeof(addr->sa_family))
1637		goto out_err;
1638	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1639		goto out_err;
1640
1641	lock_sock(sk);
1642	switch (sock->state) {
1643	default:
1644		rc = -EINVAL;
1645		goto out;
1646	case SS_CONNECTED:
1647		rc = sk->sk_state == SMC_ACTIVE ? -EISCONN : -EINVAL;
1648		goto out;
1649	case SS_CONNECTING:
1650		if (sk->sk_state == SMC_ACTIVE)
1651			goto connected;
1652		break;
1653	case SS_UNCONNECTED:
1654		sock->state = SS_CONNECTING;
1655		break;
1656	}
1657
1658	switch (sk->sk_state) {
1659	default:
1660		goto out;
1661	case SMC_CLOSED:
1662		rc = sock_error(sk) ? : -ECONNABORTED;
1663		sock->state = SS_UNCONNECTED;
1664		goto out;
1665	case SMC_ACTIVE:
1666		rc = -EISCONN;
1667		goto out;
1668	case SMC_INIT:
1669		break;
1670	}
1671
1672	smc_copy_sock_settings_to_clc(smc);
1673	tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1674	if (smc->connect_nonblock) {
1675		rc = -EALREADY;
1676		goto out;
1677	}
1678	rc = kernel_connect(smc->clcsock, addr, alen, flags);
1679	if (rc && rc != -EINPROGRESS)
1680		goto out;
1681
1682	if (smc->use_fallback) {
1683		sock->state = rc ? SS_CONNECTING : SS_CONNECTED;
1684		goto out;
1685	}
1686	sock_hold(&smc->sk); /* sock put in passive closing */
1687	if (flags & O_NONBLOCK) {
1688		if (queue_work(smc_hs_wq, &smc->connect_work))
1689			smc->connect_nonblock = 1;
1690		rc = -EINPROGRESS;
1691		goto out;
1692	} else {
1693		rc = __smc_connect(smc);
1694		if (rc < 0)
1695			goto out;
1696	}
1697
1698connected:
1699	rc = 0;
1700	sock->state = SS_CONNECTED;
1701out:
1702	release_sock(sk);
1703out_err:
1704	return rc;
1705}
1706
1707static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1708{
1709	struct socket *new_clcsock = NULL;
1710	struct sock *lsk = &lsmc->sk;
1711	struct sock *new_sk;
1712	int rc = -EINVAL;
1713
1714	release_sock(lsk);
1715	new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1716	if (!new_sk) {
1717		rc = -ENOMEM;
1718		lsk->sk_err = ENOMEM;
1719		*new_smc = NULL;
1720		lock_sock(lsk);
1721		goto out;
1722	}
1723	*new_smc = smc_sk(new_sk);
1724
1725	mutex_lock(&lsmc->clcsock_release_lock);
1726	if (lsmc->clcsock)
1727		rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1728	mutex_unlock(&lsmc->clcsock_release_lock);
1729	lock_sock(lsk);
1730	if  (rc < 0 && rc != -EAGAIN)
1731		lsk->sk_err = -rc;
1732	if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1733		new_sk->sk_prot->unhash(new_sk);
1734		if (new_clcsock)
1735			sock_release(new_clcsock);
1736		new_sk->sk_state = SMC_CLOSED;
1737		smc_sock_set_flag(new_sk, SOCK_DEAD);
1738		sock_put(new_sk); /* final */
1739		*new_smc = NULL;
1740		goto out;
1741	}
1742
1743	/* new clcsock has inherited the smc listen-specific sk_data_ready
1744	 * function; switch it back to the original sk_data_ready function
1745	 */
1746	new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1747
1748	/* if new clcsock has also inherited the fallback-specific callback
1749	 * functions, switch them back to the original ones.
1750	 */
1751	if (lsmc->use_fallback) {
1752		if (lsmc->clcsk_state_change)
1753			new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
1754		if (lsmc->clcsk_write_space)
1755			new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
1756		if (lsmc->clcsk_error_report)
1757			new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
1758	}
1759
1760	(*new_smc)->clcsock = new_clcsock;
1761out:
1762	return rc;
1763}
1764
1765/* add a just created sock to the accept queue of the listen sock as
1766 * candidate for a following socket accept call from user space
1767 */
1768static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1769{
1770	struct smc_sock *par = smc_sk(parent);
1771
1772	sock_hold(sk); /* sock_put in smc_accept_unlink () */
1773	spin_lock(&par->accept_q_lock);
1774	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1775	spin_unlock(&par->accept_q_lock);
1776	sk_acceptq_added(parent);
1777}
1778
1779/* remove a socket from the accept queue of its parental listening socket */
1780static void smc_accept_unlink(struct sock *sk)
1781{
1782	struct smc_sock *par = smc_sk(sk)->listen_smc;
1783
1784	spin_lock(&par->accept_q_lock);
1785	list_del_init(&smc_sk(sk)->accept_q);
1786	spin_unlock(&par->accept_q_lock);
1787	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1788	sock_put(sk); /* sock_hold in smc_accept_enqueue */
1789}
1790
1791/* remove a sock from the accept queue to bind it to a new socket created
1792 * for a socket accept call from user space
1793 */
1794struct sock *smc_accept_dequeue(struct sock *parent,
1795				struct socket *new_sock)
1796{
1797	struct smc_sock *isk, *n;
1798	struct sock *new_sk;
1799
1800	list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1801		new_sk = (struct sock *)isk;
1802
1803		smc_accept_unlink(new_sk);
1804		if (new_sk->sk_state == SMC_CLOSED) {
1805			new_sk->sk_prot->unhash(new_sk);
1806			if (isk->clcsock) {
1807				sock_release(isk->clcsock);
1808				isk->clcsock = NULL;
1809			}
1810			sock_put(new_sk); /* final */
1811			continue;
1812		}
1813		if (new_sock) {
1814			sock_graft(new_sk, new_sock);
1815			new_sock->state = SS_CONNECTED;
1816			if (isk->use_fallback) {
1817				smc_sk(new_sk)->clcsock->file = new_sock->file;
1818				isk->clcsock->file->private_data = isk->clcsock;
1819			}
1820		}
1821		return new_sk;
1822	}
1823	return NULL;
1824}
1825
1826/* clean up for a created but never accepted sock */
1827void smc_close_non_accepted(struct sock *sk)
1828{
1829	struct smc_sock *smc = smc_sk(sk);
1830
1831	sock_hold(sk); /* sock_put below */
1832	lock_sock(sk);
1833	if (!sk->sk_lingertime)
1834		/* wait for peer closing */
1835		WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
1836	__smc_release(smc);
1837	release_sock(sk);
1838	sock_put(sk); /* sock_hold above */
1839	sock_put(sk); /* final sock_put */
1840}
1841
1842static int smcr_serv_conf_first_link(struct smc_sock *smc)
1843{
1844	struct smc_link *link = smc->conn.lnk;
1845	struct smc_llc_qentry *qentry;
1846	int rc;
1847
1848	/* reg the sndbuf if it was vzalloced*/
1849	if (smc->conn.sndbuf_desc->is_vm) {
1850		if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
1851			return SMC_CLC_DECL_ERR_REGBUF;
1852	}
1853
1854	/* reg the rmb */
1855	if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
1856		return SMC_CLC_DECL_ERR_REGBUF;
1857
1858	/* send CONFIRM LINK request to client over the RoCE fabric */
1859	rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1860	if (rc < 0)
1861		return SMC_CLC_DECL_TIMEOUT_CL;
1862
1863	/* receive CONFIRM LINK response from client over the RoCE fabric */
1864	qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1865			      SMC_LLC_CONFIRM_LINK);
1866	if (!qentry) {
1867		struct smc_clc_msg_decline dclc;
1868
1869		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1870				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1871		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1872	}
1873	smc_llc_save_peer_uid(qentry);
1874	rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1875	smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1876	if (rc)
1877		return SMC_CLC_DECL_RMBE_EC;
1878
1879	/* confirm_rkey is implicit on 1st contact */
1880	smc->conn.rmb_desc->is_conf_rkey = true;
1881
1882	smc_llc_link_active(link);
1883	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1884
1885	if (link->lgr->max_links > 1) {
1886		down_write(&link->lgr->llc_conf_mutex);
1887		/* initial contact - try to establish second link */
1888		smc_llc_srv_add_link(link, NULL);
1889		up_write(&link->lgr->llc_conf_mutex);
1890	}
1891	return 0;
1892}
1893
1894/* listen worker: finish */
1895static void smc_listen_out(struct smc_sock *new_smc)
1896{
1897	struct smc_sock *lsmc = new_smc->listen_smc;
1898	struct sock *newsmcsk = &new_smc->sk;
1899
1900	if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
1901		atomic_dec(&lsmc->queued_smc_hs);
1902
1903	release_sock(newsmcsk); /* lock in smc_listen_work() */
1904	if (lsmc->sk.sk_state == SMC_LISTEN) {
1905		lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1906		smc_accept_enqueue(&lsmc->sk, newsmcsk);
1907		release_sock(&lsmc->sk);
1908	} else { /* no longer listening */
1909		smc_close_non_accepted(newsmcsk);
1910	}
1911
1912	/* Wake up accept */
1913	lsmc->sk.sk_data_ready(&lsmc->sk);
1914	sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1915}
1916
1917/* listen worker: finish in state connected */
1918static void smc_listen_out_connected(struct smc_sock *new_smc)
1919{
1920	struct sock *newsmcsk = &new_smc->sk;
1921
1922	if (newsmcsk->sk_state == SMC_INIT)
1923		newsmcsk->sk_state = SMC_ACTIVE;
1924
1925	smc_listen_out(new_smc);
1926}
1927
1928/* listen worker: finish in error state */
1929static void smc_listen_out_err(struct smc_sock *new_smc)
1930{
1931	struct sock *newsmcsk = &new_smc->sk;
1932	struct net *net = sock_net(newsmcsk);
1933
1934	this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1935	if (newsmcsk->sk_state == SMC_INIT)
1936		sock_put(&new_smc->sk); /* passive closing */
1937	newsmcsk->sk_state = SMC_CLOSED;
1938
1939	smc_listen_out(new_smc);
1940}
1941
1942/* listen worker: decline and fall back if possible */
1943static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1944			       int local_first, u8 version)
1945{
1946	/* RDMA setup failed, switch back to TCP */
1947	smc_conn_abort(new_smc, local_first);
1948	if (reason_code < 0 ||
1949	    smc_switch_to_fallback(new_smc, reason_code)) {
1950		/* error, no fallback possible */
1951		smc_listen_out_err(new_smc);
1952		return;
1953	}
1954	if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1955		if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1956			smc_listen_out_err(new_smc);
1957			return;
1958		}
1959	}
1960	smc_listen_out_connected(new_smc);
1961}
1962
1963/* listen worker: version checking */
1964static int smc_listen_v2_check(struct smc_sock *new_smc,
1965			       struct smc_clc_msg_proposal *pclc,
1966			       struct smc_init_info *ini)
1967{
1968	struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1969	struct smc_clc_v2_extension *pclc_v2_ext;
1970	int rc = SMC_CLC_DECL_PEERNOSMC;
1971
1972	ini->smc_type_v1 = pclc->hdr.typev1;
1973	ini->smc_type_v2 = pclc->hdr.typev2;
1974	ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1975	ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1976	if (pclc->hdr.version > SMC_V1) {
1977		if (smcd_indicated(ini->smc_type_v2))
1978			ini->smcd_version |= SMC_V2;
1979		if (smcr_indicated(ini->smc_type_v2))
1980			ini->smcr_version |= SMC_V2;
1981	}
1982	if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
1983		rc = SMC_CLC_DECL_PEERNOSMC;
1984		goto out;
1985	}
1986	pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1987	if (!pclc_v2_ext) {
1988		ini->smcd_version &= ~SMC_V2;
1989		ini->smcr_version &= ~SMC_V2;
1990		rc = SMC_CLC_DECL_NOV2EXT;
1991		goto out;
1992	}
1993	pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1994	if (ini->smcd_version & SMC_V2) {
1995		if (!smc_ism_is_v2_capable()) {
1996			ini->smcd_version &= ~SMC_V2;
1997			rc = SMC_CLC_DECL_NOISM2SUPP;
1998		} else if (!pclc_smcd_v2_ext) {
1999			ini->smcd_version &= ~SMC_V2;
2000			rc = SMC_CLC_DECL_NOV2DEXT;
2001		} else if (!pclc_v2_ext->hdr.eid_cnt &&
2002			   !pclc_v2_ext->hdr.flag.seid) {
2003			ini->smcd_version &= ~SMC_V2;
2004			rc = SMC_CLC_DECL_NOUEID;
2005		}
2006	}
2007	if (ini->smcr_version & SMC_V2) {
2008		if (!pclc_v2_ext->hdr.eid_cnt) {
2009			ini->smcr_version &= ~SMC_V2;
2010			rc = SMC_CLC_DECL_NOUEID;
2011		}
2012	}
2013
2014	ini->release_nr = pclc_v2_ext->hdr.flag.release;
2015	if (pclc_v2_ext->hdr.flag.release > SMC_RELEASE)
2016		ini->release_nr = SMC_RELEASE;
2017
2018out:
2019	if (!ini->smcd_version && !ini->smcr_version)
2020		return rc;
2021
2022	return 0;
2023}
2024
2025/* listen worker: check prefixes */
2026static int smc_listen_prfx_check(struct smc_sock *new_smc,
2027				 struct smc_clc_msg_proposal *pclc)
2028{
2029	struct smc_clc_msg_proposal_prefix *pclc_prfx;
2030	struct socket *newclcsock = new_smc->clcsock;
2031
2032	if (pclc->hdr.typev1 == SMC_TYPE_N)
2033		return 0;
2034	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
2035	if (!pclc_prfx)
2036		return -EPROTO;
2037	if (smc_clc_prfx_match(newclcsock, pclc_prfx))
2038		return SMC_CLC_DECL_DIFFPREFIX;
2039
2040	return 0;
2041}
2042
2043/* listen worker: initialize connection and buffers */
2044static int smc_listen_rdma_init(struct smc_sock *new_smc,
2045				struct smc_init_info *ini)
2046{
2047	int rc;
2048
2049	/* allocate connection / link group */
2050	rc = smc_conn_create(new_smc, ini);
2051	if (rc)
2052		return rc;
2053
2054	/* create send buffer and rmb */
2055	if (smc_buf_create(new_smc, false)) {
2056		smc_conn_abort(new_smc, ini->first_contact_local);
2057		return SMC_CLC_DECL_MEM;
2058	}
2059
2060	return 0;
2061}
2062
2063/* listen worker: initialize connection and buffers for SMC-D */
2064static int smc_listen_ism_init(struct smc_sock *new_smc,
2065			       struct smc_init_info *ini)
2066{
2067	int rc;
2068
2069	rc = smc_conn_create(new_smc, ini);
2070	if (rc)
2071		return rc;
2072
2073	/* Create send and receive buffers */
2074	rc = smc_buf_create(new_smc, true);
2075	if (rc) {
2076		smc_conn_abort(new_smc, ini->first_contact_local);
2077		return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
2078					 SMC_CLC_DECL_MEM;
2079	}
2080
2081	return 0;
2082}
2083
2084static bool smc_is_already_selected(struct smcd_dev *smcd,
2085				    struct smc_init_info *ini,
2086				    int matches)
2087{
2088	int i;
2089
2090	for (i = 0; i < matches; i++)
2091		if (smcd == ini->ism_dev[i])
2092			return true;
2093
2094	return false;
2095}
2096
2097/* check for ISM devices matching proposed ISM devices */
2098static void smc_check_ism_v2_match(struct smc_init_info *ini,
2099				   u16 proposed_chid,
2100				   struct smcd_gid *proposed_gid,
2101				   unsigned int *matches)
2102{
2103	struct smcd_dev *smcd;
2104
2105	list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2106		if (smcd->going_away)
2107			continue;
2108		if (smc_is_already_selected(smcd, ini, *matches))
2109			continue;
2110		if (smc_ism_get_chid(smcd) == proposed_chid &&
2111		    !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
2112			ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
2113			if (__smc_ism_is_emulated(proposed_chid))
2114				ini->ism_peer_gid[*matches].gid_ext =
2115							proposed_gid->gid_ext;
2116				/* non-Emulated-ISM's peer gid_ext remains 0. */
2117			ini->ism_dev[*matches] = smcd;
2118			(*matches)++;
2119			break;
2120		}
2121	}
2122}
2123
2124static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
2125{
2126	if (!ini->rc)
2127		ini->rc = rc;
2128}
2129
2130static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
2131					struct smc_clc_msg_proposal *pclc,
2132					struct smc_init_info *ini)
2133{
2134	struct smc_clc_smcd_v2_extension *smcd_v2_ext;
2135	struct smc_clc_v2_extension *smc_v2_ext;
2136	struct smc_clc_msg_smcd *pclc_smcd;
2137	unsigned int matches = 0;
2138	struct smcd_gid smcd_gid;
2139	u8 smcd_version;
2140	u8 *eid = NULL;
2141	int i, rc;
2142	u16 chid;
2143
2144	if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
2145		goto not_found;
2146
2147	pclc_smcd = smc_get_clc_msg_smcd(pclc);
2148	smc_v2_ext = smc_get_clc_v2_ext(pclc);
2149	smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
2150	if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
2151		goto not_found;
2152
2153	mutex_lock(&smcd_dev_list.mutex);
2154	if (pclc_smcd->ism.chid) {
2155		/* check for ISM device matching proposed native ISM device */
2156		smcd_gid.gid = ntohll(pclc_smcd->ism.gid);
2157		smcd_gid.gid_ext = 0;
2158		smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
2159				       &smcd_gid, &matches);
2160	}
2161	for (i = 0; i < smc_v2_ext->hdr.ism_gid_cnt; i++) {
2162		/* check for ISM devices matching proposed non-native ISM
2163		 * devices
2164		 */
2165		smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
2166		smcd_gid.gid_ext = 0;
2167		chid = ntohs(smcd_v2_ext->gidchid[i].chid);
2168		if (__smc_ism_is_emulated(chid)) {
2169			if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
2170			    chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
2171				/* each Emulated-ISM device takes two GID-CHID
2172				 * entries and CHID of the second entry repeats
2173				 * that of the first entry.
2174				 *
2175				 * So check if the next GID-CHID entry exists
2176				 * and both two entries' CHIDs are the same.
2177				 */
2178				continue;
2179			smcd_gid.gid_ext =
2180				ntohll(smcd_v2_ext->gidchid[++i].gid);
2181		}
2182		smc_check_ism_v2_match(ini, chid, &smcd_gid, &matches);
2183	}
2184	mutex_unlock(&smcd_dev_list.mutex);
2185
2186	if (!ini->ism_dev[0]) {
2187		smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
2188		goto not_found;
2189	}
2190
2191	smc_ism_get_system_eid(&eid);
2192	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
2193			       smcd_v2_ext->system_eid, eid))
2194		goto not_found;
2195
2196	/* separate - outside the smcd_dev_list.lock */
2197	smcd_version = ini->smcd_version;
2198	for (i = 0; i < matches; i++) {
2199		ini->smcd_version = SMC_V2;
2200		ini->is_smcd = true;
2201		ini->ism_selected = i;
2202		rc = smc_listen_ism_init(new_smc, ini);
2203		if (rc) {
2204			smc_find_ism_store_rc(rc, ini);
2205			/* try next active ISM device */
2206			continue;
2207		}
2208		return; /* matching and usable V2 ISM device found */
2209	}
2210	/* no V2 ISM device could be initialized */
2211	ini->smcd_version = smcd_version;	/* restore original value */
2212	ini->negotiated_eid[0] = 0;
2213
2214not_found:
2215	ini->smcd_version &= ~SMC_V2;
2216	ini->ism_dev[0] = NULL;
2217	ini->is_smcd = false;
2218}
2219
2220static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
2221					struct smc_clc_msg_proposal *pclc,
2222					struct smc_init_info *ini)
2223{
2224	struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
2225	int rc = 0;
2226
2227	/* check if ISM V1 is available */
2228	if (!(ini->smcd_version & SMC_V1) ||
2229	    !smcd_indicated(ini->smc_type_v1) ||
2230	    !pclc_smcd)
2231		goto not_found;
2232	ini->is_smcd = true; /* prepare ISM check */
2233	ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
2234	ini->ism_peer_gid[0].gid_ext = 0;
2235	rc = smc_find_ism_device(new_smc, ini);
2236	if (rc)
2237		goto not_found;
2238	ini->ism_selected = 0;
2239	rc = smc_listen_ism_init(new_smc, ini);
2240	if (!rc)
2241		return;		/* V1 ISM device found */
2242
2243not_found:
2244	smc_find_ism_store_rc(rc, ini);
2245	ini->smcd_version &= ~SMC_V1;
2246	ini->ism_dev[0] = NULL;
2247	ini->is_smcd = false;
2248}
2249
2250/* listen worker: register buffers */
2251static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
2252{
2253	struct smc_connection *conn = &new_smc->conn;
2254
2255	if (!local_first) {
2256		/* reg sendbufs if they were vzalloced */
2257		if (conn->sndbuf_desc->is_vm) {
2258			if (smcr_lgr_reg_sndbufs(conn->lnk,
2259						 conn->sndbuf_desc))
2260				return SMC_CLC_DECL_ERR_REGBUF;
2261		}
2262		if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
2263			return SMC_CLC_DECL_ERR_REGBUF;
2264	}
2265
2266	return 0;
2267}
2268
2269static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
2270					 struct smc_clc_msg_proposal *pclc,
2271					 struct smc_init_info *ini)
2272{
2273	struct smc_clc_v2_extension *smc_v2_ext;
2274	u8 smcr_version;
2275	int rc;
2276
2277	if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
2278		goto not_found;
2279
2280	smc_v2_ext = smc_get_clc_v2_ext(pclc);
2281	if (!smc_v2_ext ||
2282	    !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
2283		goto not_found;
2284
2285	/* prepare RDMA check */
2286	memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
2287	memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
2288	memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
2289	ini->check_smcrv2 = true;
2290	ini->smcrv2.clc_sk = new_smc->clcsock->sk;
2291	ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
2292	ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
2293	rc = smc_find_rdma_device(new_smc, ini);
2294	if (rc) {
2295		smc_find_ism_store_rc(rc, ini);
2296		goto not_found;
2297	}
2298	if (!ini->smcrv2.uses_gateway)
2299		memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
2300
2301	smcr_version = ini->smcr_version;
2302	ini->smcr_version = SMC_V2;
2303	rc = smc_listen_rdma_init(new_smc, ini);
2304	if (!rc) {
2305		rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
2306		if (rc)
2307			smc_conn_abort(new_smc, ini->first_contact_local);
2308	}
2309	if (!rc)
2310		return;
2311	ini->smcr_version = smcr_version;
2312	smc_find_ism_store_rc(rc, ini);
2313
2314not_found:
2315	ini->smcr_version &= ~SMC_V2;
2316	ini->smcrv2.ib_dev_v2 = NULL;
2317	ini->check_smcrv2 = false;
2318}
2319
2320static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
2321					struct smc_clc_msg_proposal *pclc,
2322					struct smc_init_info *ini)
2323{
2324	int rc;
2325
2326	if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
2327		return SMC_CLC_DECL_NOSMCDEV;
2328
2329	/* prepare RDMA check */
2330	memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
2331	memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
2332	memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
2333	rc = smc_find_rdma_device(new_smc, ini);
2334	if (rc) {
2335		/* no RDMA device found */
2336		return SMC_CLC_DECL_NOSMCDEV;
2337	}
2338	rc = smc_listen_rdma_init(new_smc, ini);
2339	if (rc)
2340		return rc;
2341	return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
2342}
2343
2344/* determine the local device matching to proposal */
2345static int smc_listen_find_device(struct smc_sock *new_smc,
2346				  struct smc_clc_msg_proposal *pclc,
2347				  struct smc_init_info *ini)
2348{
2349	int prfx_rc;
2350
2351	/* check for ISM device matching V2 proposed device */
2352	smc_find_ism_v2_device_serv(new_smc, pclc, ini);
2353	if (ini->ism_dev[0])
2354		return 0;
2355
2356	/* check for matching IP prefix and subnet length (V1) */
2357	prfx_rc = smc_listen_prfx_check(new_smc, pclc);
2358	if (prfx_rc)
2359		smc_find_ism_store_rc(prfx_rc, ini);
2360
2361	/* get vlan id from IP device */
2362	if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
2363		return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
2364
2365	/* check for ISM device matching V1 proposed device */
2366	if (!prfx_rc)
2367		smc_find_ism_v1_device_serv(new_smc, pclc, ini);
2368	if (ini->ism_dev[0])
2369		return 0;
2370
2371	if (!smcr_indicated(pclc->hdr.typev1) &&
2372	    !smcr_indicated(pclc->hdr.typev2))
2373		/* skip RDMA and decline */
2374		return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
2375
2376	/* check if RDMA V2 is available */
2377	smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
2378	if (ini->smcrv2.ib_dev_v2)
2379		return 0;
2380
2381	/* check if RDMA V1 is available */
2382	if (!prfx_rc) {
2383		int rc;
2384
2385		rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
2386		smc_find_ism_store_rc(rc, ini);
2387		return (!rc) ? 0 : ini->rc;
2388	}
2389	return prfx_rc;
2390}
2391
2392/* listen worker: finish RDMA setup */
2393static int smc_listen_rdma_finish(struct smc_sock *new_smc,
2394				  struct smc_clc_msg_accept_confirm *cclc,
2395				  bool local_first,
2396				  struct smc_init_info *ini)
2397{
2398	struct smc_link *link = new_smc->conn.lnk;
2399	int reason_code = 0;
2400
2401	if (local_first)
2402		smc_link_save_peer_info(link, cclc, ini);
2403
2404	if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
2405		return SMC_CLC_DECL_ERR_RTOK;
2406
2407	if (local_first) {
2408		if (smc_ib_ready_link(link))
2409			return SMC_CLC_DECL_ERR_RDYLNK;
2410		/* QP confirmation over RoCE fabric */
2411		smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
2412		reason_code = smcr_serv_conf_first_link(new_smc);
2413		smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
2414	}
2415	return reason_code;
2416}
2417
2418/* setup for connection of server */
2419static void smc_listen_work(struct work_struct *work)
2420{
2421	struct smc_sock *new_smc = container_of(work, struct smc_sock,
2422						smc_listen_work);
2423	struct socket *newclcsock = new_smc->clcsock;
2424	struct smc_clc_msg_accept_confirm *cclc;
2425	struct smc_clc_msg_proposal_area *buf;
2426	struct smc_clc_msg_proposal *pclc;
2427	struct smc_init_info *ini = NULL;
2428	u8 proposal_version = SMC_V1;
2429	u8 accept_version;
2430	int rc = 0;
2431
2432	lock_sock(&new_smc->sk); /* release in smc_listen_out() */
2433	if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
2434		return smc_listen_out_err(new_smc);
2435
2436	if (new_smc->use_fallback) {
2437		smc_listen_out_connected(new_smc);
2438		return;
2439	}
2440
2441	/* check if peer is smc capable */
2442	if (!tcp_sk(newclcsock->sk)->syn_smc) {
2443		rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
2444		if (rc)
2445			smc_listen_out_err(new_smc);
2446		else
2447			smc_listen_out_connected(new_smc);
2448		return;
2449	}
2450
2451	/* do inband token exchange -
2452	 * wait for and receive SMC Proposal CLC message
2453	 */
2454	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
2455	if (!buf) {
2456		rc = SMC_CLC_DECL_MEM;
2457		goto out_decl;
2458	}
2459	pclc = (struct smc_clc_msg_proposal *)buf;
2460	rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
2461			      SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
2462	if (rc)
2463		goto out_decl;
2464
2465	if (pclc->hdr.version > SMC_V1)
2466		proposal_version = SMC_V2;
2467
2468	/* IPSec connections opt out of SMC optimizations */
2469	if (using_ipsec(new_smc)) {
2470		rc = SMC_CLC_DECL_IPSEC;
2471		goto out_decl;
2472	}
2473
2474	ini = kzalloc(sizeof(*ini), GFP_KERNEL);
2475	if (!ini) {
2476		rc = SMC_CLC_DECL_MEM;
2477		goto out_decl;
2478	}
2479
2480	/* initial version checking */
2481	rc = smc_listen_v2_check(new_smc, pclc, ini);
2482	if (rc)
2483		goto out_decl;
2484
2485	rc = smc_clc_srv_v2x_features_validate(new_smc, pclc, ini);
2486	if (rc)
2487		goto out_decl;
2488
2489	mutex_lock(&smc_server_lgr_pending);
 
2490	smc_rx_init(new_smc);
2491	smc_tx_init(new_smc);
2492
2493	/* determine ISM or RoCE device used for connection */
2494	rc = smc_listen_find_device(new_smc, pclc, ini);
2495	if (rc)
2496		goto out_unlock;
2497
2498	/* send SMC Accept CLC message */
2499	accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
2500	rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
2501				 accept_version, ini->negotiated_eid, ini);
2502	if (rc)
2503		goto out_unlock;
2504
2505	/* SMC-D does not need this lock any more */
2506	if (ini->is_smcd)
2507		mutex_unlock(&smc_server_lgr_pending);
2508
2509	/* receive SMC Confirm CLC message */
2510	memset(buf, 0, sizeof(*buf));
2511	cclc = (struct smc_clc_msg_accept_confirm *)buf;
2512	rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
2513			      SMC_CLC_CONFIRM, CLC_WAIT_TIME);
2514	if (rc) {
2515		if (!ini->is_smcd)
2516			goto out_unlock;
2517		goto out_decl;
2518	}
2519
2520	rc = smc_clc_v2x_features_confirm_check(cclc, ini);
2521	if (rc) {
2522		if (!ini->is_smcd)
2523			goto out_unlock;
2524		goto out_decl;
2525	}
2526
2527	/* fce smc release version is needed in smc_listen_rdma_finish,
2528	 * so save fce info here.
2529	 */
2530	smc_conn_save_peer_info_fce(new_smc, cclc);
2531
2532	/* finish worker */
2533	if (!ini->is_smcd) {
2534		rc = smc_listen_rdma_finish(new_smc, cclc,
2535					    ini->first_contact_local, ini);
2536		if (rc)
2537			goto out_unlock;
2538		mutex_unlock(&smc_server_lgr_pending);
2539	}
2540	smc_conn_save_peer_info(new_smc, cclc);
2541
2542	if (ini->is_smcd &&
2543	    smc_ism_support_dmb_nocopy(new_smc->conn.lgr->smcd)) {
2544		rc = smcd_buf_attach(new_smc);
2545		if (rc)
2546			goto out_decl;
2547	}
2548
2549	smc_listen_out_connected(new_smc);
2550	SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
2551	goto out_free;
2552
2553out_unlock:
2554	mutex_unlock(&smc_server_lgr_pending);
2555out_decl:
2556	smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
2557			   proposal_version);
2558out_free:
2559	kfree(ini);
2560	kfree(buf);
2561}
2562
2563static void smc_tcp_listen_work(struct work_struct *work)
2564{
2565	struct smc_sock *lsmc = container_of(work, struct smc_sock,
2566					     tcp_listen_work);
2567	struct sock *lsk = &lsmc->sk;
2568	struct smc_sock *new_smc;
2569	int rc = 0;
2570
2571	lock_sock(lsk);
2572	while (lsk->sk_state == SMC_LISTEN) {
2573		rc = smc_clcsock_accept(lsmc, &new_smc);
2574		if (rc) /* clcsock accept queue empty or error */
2575			goto out;
2576		if (!new_smc)
2577			continue;
2578
2579		if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
2580			atomic_inc(&lsmc->queued_smc_hs);
2581
2582		new_smc->listen_smc = lsmc;
2583		new_smc->use_fallback = lsmc->use_fallback;
2584		new_smc->fallback_rsn = lsmc->fallback_rsn;
2585		sock_hold(lsk); /* sock_put in smc_listen_work */
2586		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2587		smc_copy_sock_settings_to_smc(new_smc);
2588		sock_hold(&new_smc->sk); /* sock_put in passive closing */
2589		if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2590			sock_put(&new_smc->sk);
2591	}
2592
2593out:
2594	release_sock(lsk);
2595	sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2596}
2597
2598static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2599{
2600	struct smc_sock *lsmc;
2601
2602	read_lock_bh(&listen_clcsock->sk_callback_lock);
2603	lsmc = smc_clcsock_user_data(listen_clcsock);
2604	if (!lsmc)
2605		goto out;
2606	lsmc->clcsk_data_ready(listen_clcsock);
2607	if (lsmc->sk.sk_state == SMC_LISTEN) {
2608		sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2609		if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
2610			sock_put(&lsmc->sk);
2611	}
2612out:
2613	read_unlock_bh(&listen_clcsock->sk_callback_lock);
2614}
2615
2616int smc_listen(struct socket *sock, int backlog)
2617{
2618	struct sock *sk = sock->sk;
2619	struct smc_sock *smc;
2620	int rc;
2621
2622	smc = smc_sk(sk);
2623	lock_sock(sk);
2624
2625	rc = -EINVAL;
2626	if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2627	    smc->connect_nonblock || sock->state != SS_UNCONNECTED)
2628		goto out;
2629
2630	rc = 0;
2631	if (sk->sk_state == SMC_LISTEN) {
2632		sk->sk_max_ack_backlog = backlog;
2633		goto out;
2634	}
2635	/* some socket options are handled in core, so we could not apply
2636	 * them to the clc socket -- copy smc socket options to clc socket
2637	 */
2638	smc_copy_sock_settings_to_clc(smc);
2639	if (!smc->use_fallback)
2640		tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2641
2642	/* save original sk_data_ready function and establish
2643	 * smc-specific sk_data_ready function
2644	 */
2645	write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2646	smc->clcsock->sk->sk_user_data =
2647		(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2648	smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
2649			       smc_clcsock_data_ready, &smc->clcsk_data_ready);
2650	write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2651
2652	/* save original ops */
2653	smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
2654
2655	smc->af_ops = *smc->ori_af_ops;
2656	smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
2657
2658	inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
2659
2660	if (smc->limit_smc_hs)
2661		tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
2662
2663	rc = kernel_listen(smc->clcsock, backlog);
2664	if (rc) {
2665		write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2666		smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
2667				       &smc->clcsk_data_ready);
2668		smc->clcsock->sk->sk_user_data = NULL;
2669		write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2670		goto out;
2671	}
2672	sk->sk_max_ack_backlog = backlog;
2673	sk->sk_ack_backlog = 0;
2674	sk->sk_state = SMC_LISTEN;
2675
2676out:
2677	release_sock(sk);
2678	return rc;
2679}
2680
2681int smc_accept(struct socket *sock, struct socket *new_sock,
2682	       struct proto_accept_arg *arg)
2683{
2684	struct sock *sk = sock->sk, *nsk;
2685	DECLARE_WAITQUEUE(wait, current);
2686	struct smc_sock *lsmc;
2687	long timeo;
2688	int rc = 0;
2689
2690	lsmc = smc_sk(sk);
2691	sock_hold(sk); /* sock_put below */
2692	lock_sock(sk);
2693
2694	if (lsmc->sk.sk_state != SMC_LISTEN) {
2695		rc = -EINVAL;
2696		release_sock(sk);
2697		goto out;
2698	}
2699
2700	/* Wait for an incoming connection */
2701	timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
2702	add_wait_queue_exclusive(sk_sleep(sk), &wait);
2703	while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2704		set_current_state(TASK_INTERRUPTIBLE);
2705		if (!timeo) {
2706			rc = -EAGAIN;
2707			break;
2708		}
2709		release_sock(sk);
2710		timeo = schedule_timeout(timeo);
2711		/* wakeup by sk_data_ready in smc_listen_work() */
2712		sched_annotate_sleep();
2713		lock_sock(sk);
2714		if (signal_pending(current)) {
2715			rc = sock_intr_errno(timeo);
2716			break;
2717		}
2718	}
2719	set_current_state(TASK_RUNNING);
2720	remove_wait_queue(sk_sleep(sk), &wait);
2721
2722	if (!rc)
2723		rc = sock_error(nsk);
2724	release_sock(sk);
2725	if (rc)
2726		goto out;
2727
2728	if (lsmc->sockopt_defer_accept && !(arg->flags & O_NONBLOCK)) {
2729		/* wait till data arrives on the socket */
2730		timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2731								MSEC_PER_SEC);
2732		if (smc_sk(nsk)->use_fallback) {
2733			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2734
2735			lock_sock(clcsk);
2736			if (skb_queue_empty(&clcsk->sk_receive_queue))
2737				sk_wait_data(clcsk, &timeo, NULL);
2738			release_sock(clcsk);
2739		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2740			lock_sock(nsk);
2741			smc_rx_wait(smc_sk(nsk), &timeo, 0, smc_rx_data_available);
2742			release_sock(nsk);
2743		}
2744	}
2745
2746out:
2747	sock_put(sk); /* sock_hold above */
2748	return rc;
2749}
2750
2751int smc_getname(struct socket *sock, struct sockaddr *addr,
2752		int peer)
2753{
2754	struct smc_sock *smc;
2755
2756	if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2757	    (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2758		return -ENOTCONN;
2759
2760	smc = smc_sk(sock->sk);
2761
2762	return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2763}
2764
2765int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2766{
2767	struct sock *sk = sock->sk;
2768	struct smc_sock *smc;
2769	int rc;
2770
2771	smc = smc_sk(sk);
2772	lock_sock(sk);
2773
2774	/* SMC does not support connect with fastopen */
2775	if (msg->msg_flags & MSG_FASTOPEN) {
2776		/* not connected yet, fallback */
2777		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2778			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2779			if (rc)
2780				goto out;
2781		} else {
2782			rc = -EINVAL;
2783			goto out;
2784		}
2785	} else if ((sk->sk_state != SMC_ACTIVE) &&
2786		   (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2787		   (sk->sk_state != SMC_INIT)) {
2788		rc = -EPIPE;
2789		goto out;
2790	}
2791
2792	if (smc->use_fallback) {
2793		rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2794	} else {
2795		rc = smc_tx_sendmsg(smc, msg, len);
2796		SMC_STAT_TX_PAYLOAD(smc, len, rc);
2797	}
2798out:
2799	release_sock(sk);
2800	return rc;
2801}
2802
2803int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2804		int flags)
2805{
2806	struct sock *sk = sock->sk;
2807	struct smc_sock *smc;
2808	int rc = -ENOTCONN;
2809
2810	smc = smc_sk(sk);
2811	lock_sock(sk);
2812	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2813		/* socket was connected before, no more data to read */
2814		rc = 0;
2815		goto out;
2816	}
2817	if ((sk->sk_state == SMC_INIT) ||
2818	    (sk->sk_state == SMC_LISTEN) ||
2819	    (sk->sk_state == SMC_CLOSED))
2820		goto out;
2821
2822	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2823		rc = 0;
2824		goto out;
2825	}
2826
2827	if (smc->use_fallback) {
2828		rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2829	} else {
2830		msg->msg_namelen = 0;
2831		rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2832		SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2833	}
2834
2835out:
2836	release_sock(sk);
2837	return rc;
2838}
2839
2840static __poll_t smc_accept_poll(struct sock *parent)
2841{
2842	struct smc_sock *isk = smc_sk(parent);
2843	__poll_t mask = 0;
2844
2845	spin_lock(&isk->accept_q_lock);
2846	if (!list_empty(&isk->accept_q))
2847		mask = EPOLLIN | EPOLLRDNORM;
2848	spin_unlock(&isk->accept_q_lock);
2849
2850	return mask;
2851}
2852
2853__poll_t smc_poll(struct file *file, struct socket *sock,
2854		  poll_table *wait)
2855{
2856	struct sock *sk = sock->sk;
2857	struct smc_sock *smc;
2858	__poll_t mask = 0;
2859
2860	if (!sk)
2861		return EPOLLNVAL;
2862
2863	smc = smc_sk(sock->sk);
2864	if (smc->use_fallback) {
2865		/* delegate to CLC child sock */
2866		mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2867		sk->sk_err = smc->clcsock->sk->sk_err;
2868	} else {
2869		if (sk->sk_state != SMC_CLOSED)
2870			sock_poll_wait(file, sock, wait);
2871		if (sk->sk_err)
2872			mask |= EPOLLERR;
2873		if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2874		    (sk->sk_state == SMC_CLOSED))
2875			mask |= EPOLLHUP;
2876		if (sk->sk_state == SMC_LISTEN) {
2877			/* woken up by sk_data_ready in smc_listen_work() */
2878			mask |= smc_accept_poll(sk);
2879		} else if (smc->use_fallback) { /* as result of connect_work()*/
2880			mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2881							   wait);
2882			sk->sk_err = smc->clcsock->sk->sk_err;
2883		} else {
2884			if ((sk->sk_state != SMC_INIT &&
2885			     atomic_read(&smc->conn.sndbuf_space)) ||
2886			    sk->sk_shutdown & SEND_SHUTDOWN) {
2887				mask |= EPOLLOUT | EPOLLWRNORM;
2888			} else {
2889				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2890				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2891
2892				if (sk->sk_state != SMC_INIT) {
2893					/* Race breaker the same way as tcp_poll(). */
2894					smp_mb__after_atomic();
2895					if (atomic_read(&smc->conn.sndbuf_space))
2896						mask |= EPOLLOUT | EPOLLWRNORM;
2897				}
2898			}
2899			if (atomic_read(&smc->conn.bytes_to_rcv))
2900				mask |= EPOLLIN | EPOLLRDNORM;
2901			if (sk->sk_shutdown & RCV_SHUTDOWN)
2902				mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2903			if (sk->sk_state == SMC_APPCLOSEWAIT1)
2904				mask |= EPOLLIN;
2905			if (smc->conn.urg_state == SMC_URG_VALID)
2906				mask |= EPOLLPRI;
2907		}
2908	}
2909
2910	return mask;
2911}
2912
2913int smc_shutdown(struct socket *sock, int how)
2914{
2915	struct sock *sk = sock->sk;
2916	bool do_shutdown = true;
2917	struct smc_sock *smc;
2918	int rc = -EINVAL;
2919	int old_state;
2920	int rc1 = 0;
2921
2922	smc = smc_sk(sk);
2923
2924	if ((how < SHUT_RD) || (how > SHUT_RDWR))
2925		return rc;
2926
2927	lock_sock(sk);
2928
2929	if (sock->state == SS_CONNECTING) {
2930		if (sk->sk_state == SMC_ACTIVE)
2931			sock->state = SS_CONNECTED;
2932		else if (sk->sk_state == SMC_PEERCLOSEWAIT1 ||
2933			 sk->sk_state == SMC_PEERCLOSEWAIT2 ||
2934			 sk->sk_state == SMC_APPCLOSEWAIT1 ||
2935			 sk->sk_state == SMC_APPCLOSEWAIT2 ||
2936			 sk->sk_state == SMC_APPFINCLOSEWAIT)
2937			sock->state = SS_DISCONNECTING;
2938	}
2939
2940	rc = -ENOTCONN;
2941	if ((sk->sk_state != SMC_ACTIVE) &&
2942	    (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2943	    (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2944	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2945	    (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2946	    (sk->sk_state != SMC_APPFINCLOSEWAIT))
2947		goto out;
2948	if (smc->use_fallback) {
2949		rc = kernel_sock_shutdown(smc->clcsock, how);
2950		sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2951		if (sk->sk_shutdown == SHUTDOWN_MASK) {
2952			sk->sk_state = SMC_CLOSED;
2953			sk->sk_socket->state = SS_UNCONNECTED;
2954			sock_put(sk);
2955		}
2956		goto out;
2957	}
2958	switch (how) {
2959	case SHUT_RDWR:		/* shutdown in both directions */
2960		old_state = sk->sk_state;
2961		rc = smc_close_active(smc);
2962		if (old_state == SMC_ACTIVE &&
2963		    sk->sk_state == SMC_PEERCLOSEWAIT1)
2964			do_shutdown = false;
2965		break;
2966	case SHUT_WR:
2967		rc = smc_close_shutdown_write(smc);
2968		break;
2969	case SHUT_RD:
2970		rc = 0;
2971		/* nothing more to do because peer is not involved */
2972		break;
2973	}
2974	if (do_shutdown && smc->clcsock)
2975		rc1 = kernel_sock_shutdown(smc->clcsock, how);
2976	/* map sock_shutdown_cmd constants to sk_shutdown value range */
2977	sk->sk_shutdown |= how + 1;
2978
2979	if (sk->sk_state == SMC_CLOSED)
2980		sock->state = SS_UNCONNECTED;
2981	else
2982		sock->state = SS_DISCONNECTING;
2983out:
2984	release_sock(sk);
2985	return rc ? rc : rc1;
2986}
2987
2988static int __smc_getsockopt(struct socket *sock, int level, int optname,
2989			    char __user *optval, int __user *optlen)
2990{
2991	struct smc_sock *smc;
2992	int val, len;
2993
2994	smc = smc_sk(sock->sk);
2995
2996	if (get_user(len, optlen))
2997		return -EFAULT;
2998
2999	len = min_t(int, len, sizeof(int));
3000
3001	if (len < 0)
3002		return -EINVAL;
3003
3004	switch (optname) {
3005	case SMC_LIMIT_HS:
3006		val = smc->limit_smc_hs;
3007		break;
3008	default:
3009		return -EOPNOTSUPP;
3010	}
3011
3012	if (put_user(len, optlen))
3013		return -EFAULT;
3014	if (copy_to_user(optval, &val, len))
3015		return -EFAULT;
3016
3017	return 0;
3018}
3019
3020static int __smc_setsockopt(struct socket *sock, int level, int optname,
3021			    sockptr_t optval, unsigned int optlen)
3022{
3023	struct sock *sk = sock->sk;
3024	struct smc_sock *smc;
3025	int val, rc;
3026
3027	smc = smc_sk(sk);
3028
3029	lock_sock(sk);
3030	switch (optname) {
3031	case SMC_LIMIT_HS:
3032		if (optlen < sizeof(int)) {
3033			rc = -EINVAL;
3034			break;
3035		}
3036		if (copy_from_sockptr(&val, optval, sizeof(int))) {
3037			rc = -EFAULT;
3038			break;
3039		}
3040
3041		smc->limit_smc_hs = !!val;
3042		rc = 0;
3043		break;
3044	default:
3045		rc = -EOPNOTSUPP;
3046		break;
3047	}
3048	release_sock(sk);
3049
3050	return rc;
3051}
3052
3053int smc_setsockopt(struct socket *sock, int level, int optname,
3054		   sockptr_t optval, unsigned int optlen)
3055{
3056	struct sock *sk = sock->sk;
3057	struct smc_sock *smc;
3058	int val, rc;
3059
3060	if (level == SOL_TCP && optname == TCP_ULP)
3061		return -EOPNOTSUPP;
3062	else if (level == SOL_SMC)
3063		return __smc_setsockopt(sock, level, optname, optval, optlen);
3064
3065	smc = smc_sk(sk);
3066
3067	/* generic setsockopts reaching us here always apply to the
3068	 * CLC socket
3069	 */
3070	mutex_lock(&smc->clcsock_release_lock);
3071	if (!smc->clcsock) {
3072		mutex_unlock(&smc->clcsock_release_lock);
3073		return -EBADF;
3074	}
3075	if (unlikely(!smc->clcsock->ops->setsockopt))
3076		rc = -EOPNOTSUPP;
3077	else
3078		rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
3079						   optval, optlen);
3080	if (smc->clcsock->sk->sk_err) {
3081		sk->sk_err = smc->clcsock->sk->sk_err;
3082		sk_error_report(sk);
3083	}
3084	mutex_unlock(&smc->clcsock_release_lock);
3085
3086	if (optlen < sizeof(int))
3087		return -EINVAL;
3088	if (copy_from_sockptr(&val, optval, sizeof(int)))
3089		return -EFAULT;
3090
3091	lock_sock(sk);
3092	if (rc || smc->use_fallback)
3093		goto out;
3094	switch (optname) {
3095	case TCP_FASTOPEN:
3096	case TCP_FASTOPEN_CONNECT:
3097	case TCP_FASTOPEN_KEY:
3098	case TCP_FASTOPEN_NO_COOKIE:
3099		/* option not supported by SMC */
3100		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
3101			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
3102		} else {
3103			rc = -EINVAL;
3104		}
3105		break;
3106	case TCP_NODELAY:
3107		if (sk->sk_state != SMC_INIT &&
3108		    sk->sk_state != SMC_LISTEN &&
3109		    sk->sk_state != SMC_CLOSED) {
3110			if (val) {
3111				SMC_STAT_INC(smc, ndly_cnt);
3112				smc_tx_pending(&smc->conn);
3113				cancel_delayed_work(&smc->conn.tx_work);
3114			}
3115		}
3116		break;
3117	case TCP_CORK:
3118		if (sk->sk_state != SMC_INIT &&
3119		    sk->sk_state != SMC_LISTEN &&
3120		    sk->sk_state != SMC_CLOSED) {
3121			if (!val) {
3122				SMC_STAT_INC(smc, cork_cnt);
3123				smc_tx_pending(&smc->conn);
3124				cancel_delayed_work(&smc->conn.tx_work);
3125			}
3126		}
3127		break;
3128	case TCP_DEFER_ACCEPT:
3129		smc->sockopt_defer_accept = val;
3130		break;
3131	default:
3132		break;
3133	}
3134out:
3135	release_sock(sk);
3136
3137	return rc;
3138}
3139
3140int smc_getsockopt(struct socket *sock, int level, int optname,
3141		   char __user *optval, int __user *optlen)
3142{
3143	struct smc_sock *smc;
3144	int rc;
3145
3146	if (level == SOL_SMC)
3147		return __smc_getsockopt(sock, level, optname, optval, optlen);
3148
3149	smc = smc_sk(sock->sk);
3150	mutex_lock(&smc->clcsock_release_lock);
3151	if (!smc->clcsock) {
3152		mutex_unlock(&smc->clcsock_release_lock);
3153		return -EBADF;
3154	}
3155	/* socket options apply to the CLC socket */
3156	if (unlikely(!smc->clcsock->ops->getsockopt)) {
3157		mutex_unlock(&smc->clcsock_release_lock);
3158		return -EOPNOTSUPP;
3159	}
3160	rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
3161					   optval, optlen);
3162	mutex_unlock(&smc->clcsock_release_lock);
3163	return rc;
3164}
3165
3166int smc_ioctl(struct socket *sock, unsigned int cmd,
3167	      unsigned long arg)
3168{
3169	union smc_host_cursor cons, urg;
3170	struct smc_connection *conn;
3171	struct smc_sock *smc;
3172	int answ;
3173
3174	smc = smc_sk(sock->sk);
3175	conn = &smc->conn;
3176	lock_sock(&smc->sk);
3177	if (smc->use_fallback) {
3178		if (!smc->clcsock) {
3179			release_sock(&smc->sk);
3180			return -EBADF;
3181		}
3182		answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
3183		release_sock(&smc->sk);
3184		return answ;
3185	}
3186	switch (cmd) {
3187	case SIOCINQ: /* same as FIONREAD */
3188		if (smc->sk.sk_state == SMC_LISTEN) {
3189			release_sock(&smc->sk);
3190			return -EINVAL;
3191		}
3192		if (smc->sk.sk_state == SMC_INIT ||
3193		    smc->sk.sk_state == SMC_CLOSED)
3194			answ = 0;
3195		else
3196			answ = atomic_read(&smc->conn.bytes_to_rcv);
3197		break;
3198	case SIOCOUTQ:
3199		/* output queue size (not send + not acked) */
3200		if (smc->sk.sk_state == SMC_LISTEN) {
3201			release_sock(&smc->sk);
3202			return -EINVAL;
3203		}
3204		if (smc->sk.sk_state == SMC_INIT ||
3205		    smc->sk.sk_state == SMC_CLOSED)
3206			answ = 0;
3207		else
3208			answ = smc->conn.sndbuf_desc->len -
3209					atomic_read(&smc->conn.sndbuf_space);
3210		break;
3211	case SIOCOUTQNSD:
3212		/* output queue size (not send only) */
3213		if (smc->sk.sk_state == SMC_LISTEN) {
3214			release_sock(&smc->sk);
3215			return -EINVAL;
3216		}
3217		if (smc->sk.sk_state == SMC_INIT ||
3218		    smc->sk.sk_state == SMC_CLOSED)
3219			answ = 0;
3220		else
3221			answ = smc_tx_prepared_sends(&smc->conn);
3222		break;
3223	case SIOCATMARK:
3224		if (smc->sk.sk_state == SMC_LISTEN) {
3225			release_sock(&smc->sk);
3226			return -EINVAL;
3227		}
3228		if (smc->sk.sk_state == SMC_INIT ||
3229		    smc->sk.sk_state == SMC_CLOSED) {
3230			answ = 0;
3231		} else {
3232			smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
3233			smc_curs_copy(&urg, &conn->urg_curs, conn);
3234			answ = smc_curs_diff(conn->rmb_desc->len,
3235					     &cons, &urg) == 1;
3236		}
3237		break;
3238	default:
3239		release_sock(&smc->sk);
3240		return -ENOIOCTLCMD;
3241	}
3242	release_sock(&smc->sk);
3243
3244	return put_user(answ, (int __user *)arg);
3245}
3246
3247/* Map the affected portions of the rmbe into an spd, note the number of bytes
3248 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
3249 * updates till whenever a respective page has been fully processed.
3250 * Note that subsequent recv() calls have to wait till all splice() processing
3251 * completed.
3252 */
3253ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
3254			struct pipe_inode_info *pipe, size_t len,
3255			unsigned int flags)
3256{
3257	struct sock *sk = sock->sk;
3258	struct smc_sock *smc;
3259	int rc = -ENOTCONN;
3260
3261	smc = smc_sk(sk);
3262	lock_sock(sk);
3263	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
3264		/* socket was connected before, no more data to read */
3265		rc = 0;
3266		goto out;
3267	}
3268	if (sk->sk_state == SMC_INIT ||
3269	    sk->sk_state == SMC_LISTEN ||
3270	    sk->sk_state == SMC_CLOSED)
3271		goto out;
3272
3273	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
3274		rc = 0;
3275		goto out;
3276	}
3277
3278	if (smc->use_fallback) {
3279		rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
3280						    pipe, len, flags);
3281	} else {
3282		if (*ppos) {
3283			rc = -ESPIPE;
3284			goto out;
3285		}
3286		if (flags & SPLICE_F_NONBLOCK)
3287			flags = MSG_DONTWAIT;
3288		else
3289			flags = 0;
3290		SMC_STAT_INC(smc, splice_cnt);
3291		rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
3292	}
3293out:
3294	release_sock(sk);
3295
3296	return rc;
3297}
3298
3299/* must look like tcp */
3300static const struct proto_ops smc_sock_ops = {
3301	.family		= PF_SMC,
3302	.owner		= THIS_MODULE,
3303	.release	= smc_release,
3304	.bind		= smc_bind,
3305	.connect	= smc_connect,
3306	.socketpair	= sock_no_socketpair,
3307	.accept		= smc_accept,
3308	.getname	= smc_getname,
3309	.poll		= smc_poll,
3310	.ioctl		= smc_ioctl,
3311	.listen		= smc_listen,
3312	.shutdown	= smc_shutdown,
3313	.setsockopt	= smc_setsockopt,
3314	.getsockopt	= smc_getsockopt,
3315	.sendmsg	= smc_sendmsg,
3316	.recvmsg	= smc_recvmsg,
3317	.mmap		= sock_no_mmap,
3318	.splice_read	= smc_splice_read,
3319};
3320
3321int smc_create_clcsk(struct net *net, struct sock *sk, int family)
3322{
3323	struct smc_sock *smc = smc_sk(sk);
3324	int rc;
3325
3326	rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
3327			      &smc->clcsock);
3328	if (rc)
3329		return rc;
3330
3331	/* smc_clcsock_release() does not wait smc->clcsock->sk's
3332	 * destruction;  its sk_state might not be TCP_CLOSE after
3333	 * smc->sk is close()d, and TCP timers can be fired later,
3334	 * which need net ref.
3335	 */
3336	sk = smc->clcsock->sk;
3337	sk_net_refcnt_upgrade(sk);
3338	return 0;
3339}
3340
3341static int __smc_create(struct net *net, struct socket *sock, int protocol,
3342			int kern, struct socket *clcsock)
3343{
3344	int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
3345	struct smc_sock *smc;
3346	struct sock *sk;
3347	int rc;
3348
3349	rc = -ESOCKTNOSUPPORT;
3350	if (sock->type != SOCK_STREAM)
3351		goto out;
3352
3353	rc = -EPROTONOSUPPORT;
3354	if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
3355		goto out;
3356
3357	rc = -ENOBUFS;
3358	sock->ops = &smc_sock_ops;
3359	sock->state = SS_UNCONNECTED;
3360	sk = smc_sock_alloc(net, sock, protocol);
3361	if (!sk)
3362		goto out;
3363
3364	/* create internal TCP socket for CLC handshake and fallback */
3365	smc = smc_sk(sk);
 
 
 
 
 
3366
3367	rc = 0;
3368	if (clcsock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3369		smc->clcsock = clcsock;
3370	else
3371		rc = smc_create_clcsk(net, sk, family);
3372
3373	if (rc) {
3374		sk_common_release(sk);
3375		sock->sk = NULL;
3376	}
3377out:
3378	return rc;
3379}
3380
3381static int smc_create(struct net *net, struct socket *sock, int protocol,
3382		      int kern)
3383{
3384	return __smc_create(net, sock, protocol, kern, NULL);
3385}
3386
3387static const struct net_proto_family smc_sock_family_ops = {
3388	.family	= PF_SMC,
3389	.owner	= THIS_MODULE,
3390	.create	= smc_create,
3391};
3392
3393static int smc_ulp_init(struct sock *sk)
3394{
3395	struct socket *tcp = sk->sk_socket;
3396	struct net *net = sock_net(sk);
3397	struct socket *smcsock;
3398	int protocol, ret;
3399
3400	/* only TCP can be replaced */
3401	if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
3402	    (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
3403		return -ESOCKTNOSUPPORT;
3404	/* don't handle wq now */
3405	if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
3406		return -ENOTCONN;
3407
3408	if (sk->sk_family == AF_INET)
3409		protocol = SMCPROTO_SMC;
3410	else
3411		protocol = SMCPROTO_SMC6;
3412
3413	smcsock = sock_alloc();
3414	if (!smcsock)
3415		return -ENFILE;
3416
3417	smcsock->type = SOCK_STREAM;
3418	__module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
3419	ret = __smc_create(net, smcsock, protocol, 1, tcp);
3420	if (ret) {
3421		sock_release(smcsock); /* module_put() which ops won't be NULL */
3422		return ret;
3423	}
3424
3425	/* replace tcp socket to smc */
3426	smcsock->file = tcp->file;
3427	smcsock->file->private_data = smcsock;
3428	smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
3429	smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
3430	tcp->file = NULL;
3431
3432	return ret;
3433}
3434
3435static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
3436			  const gfp_t priority)
3437{
3438	struct inet_connection_sock *icsk = inet_csk(newsk);
3439
3440	/* don't inherit ulp ops to child when listen */
3441	icsk->icsk_ulp_ops = NULL;
3442}
3443
3444static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
3445	.name		= "smc",
3446	.owner		= THIS_MODULE,
3447	.init		= smc_ulp_init,
3448	.clone		= smc_ulp_clone,
3449};
3450
3451unsigned int smc_net_id;
3452
3453static __net_init int smc_net_init(struct net *net)
3454{
3455	int rc;
3456
3457	rc = smc_sysctl_net_init(net);
3458	if (rc)
3459		return rc;
3460	return smc_pnet_net_init(net);
3461}
3462
3463static void __net_exit smc_net_exit(struct net *net)
3464{
3465	smc_sysctl_net_exit(net);
3466	smc_pnet_net_exit(net);
3467}
3468
3469static __net_init int smc_net_stat_init(struct net *net)
3470{
3471	return smc_stats_init(net);
3472}
3473
3474static void __net_exit smc_net_stat_exit(struct net *net)
3475{
3476	smc_stats_exit(net);
3477}
3478
3479static struct pernet_operations smc_net_ops = {
3480	.init = smc_net_init,
3481	.exit = smc_net_exit,
3482	.id   = &smc_net_id,
3483	.size = sizeof(struct smc_net),
3484};
3485
3486static struct pernet_operations smc_net_stat_ops = {
3487	.init = smc_net_stat_init,
3488	.exit = smc_net_stat_exit,
3489};
3490
3491static int __init smc_init(void)
3492{
3493	int rc;
3494
3495	rc = register_pernet_subsys(&smc_net_ops);
3496	if (rc)
3497		return rc;
3498
3499	rc = register_pernet_subsys(&smc_net_stat_ops);
3500	if (rc)
3501		goto out_pernet_subsys;
3502
3503	rc = smc_ism_init();
3504	if (rc)
3505		goto out_pernet_subsys_stat;
3506	smc_clc_init();
3507
3508	rc = smc_nl_init();
3509	if (rc)
3510		goto out_ism;
3511
3512	rc = smc_pnet_init();
3513	if (rc)
3514		goto out_nl;
3515
3516	rc = -ENOMEM;
3517
3518	smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0);
3519	if (!smc_tcp_ls_wq)
3520		goto out_pnet;
3521
3522	smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
3523	if (!smc_hs_wq)
3524		goto out_alloc_tcp_ls_wq;
3525
3526	smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
3527	if (!smc_close_wq)
3528		goto out_alloc_hs_wq;
3529
3530	rc = smc_core_init();
3531	if (rc) {
3532		pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
3533		goto out_alloc_wqs;
3534	}
3535
3536	rc = smc_llc_init();
3537	if (rc) {
3538		pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
3539		goto out_core;
3540	}
3541
3542	rc = smc_cdc_init();
3543	if (rc) {
3544		pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
3545		goto out_core;
3546	}
3547
3548	rc = proto_register(&smc_proto, 1);
3549	if (rc) {
3550		pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
3551		goto out_core;
3552	}
3553
3554	rc = proto_register(&smc_proto6, 1);
3555	if (rc) {
3556		pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
3557		goto out_proto;
3558	}
3559
3560	rc = sock_register(&smc_sock_family_ops);
3561	if (rc) {
3562		pr_err("%s: sock_register fails with %d\n", __func__, rc);
3563		goto out_proto6;
3564	}
3565	INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
3566	INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
3567
3568	rc = smc_ib_register_client();
3569	if (rc) {
3570		pr_err("%s: ib_register fails with %d\n", __func__, rc);
3571		goto out_sock;
3572	}
3573
3574	rc = smc_loopback_init();
3575	if (rc) {
3576		pr_err("%s: smc_loopback_init fails with %d\n", __func__, rc);
3577		goto out_ib;
3578	}
3579
3580	rc = tcp_register_ulp(&smc_ulp_ops);
3581	if (rc) {
3582		pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
3583		goto out_lo;
3584	}
3585	rc = smc_inet_init();
3586	if (rc) {
3587		pr_err("%s: smc_inet_init fails with %d\n", __func__, rc);
3588		goto out_ulp;
3589	}
3590	static_branch_enable(&tcp_have_smc);
3591	return 0;
3592out_ulp:
3593	tcp_unregister_ulp(&smc_ulp_ops);
3594out_lo:
3595	smc_loopback_exit();
3596out_ib:
3597	smc_ib_unregister_client();
3598out_sock:
3599	sock_unregister(PF_SMC);
3600out_proto6:
3601	proto_unregister(&smc_proto6);
3602out_proto:
3603	proto_unregister(&smc_proto);
3604out_core:
3605	smc_core_exit();
3606out_alloc_wqs:
3607	destroy_workqueue(smc_close_wq);
3608out_alloc_hs_wq:
3609	destroy_workqueue(smc_hs_wq);
3610out_alloc_tcp_ls_wq:
3611	destroy_workqueue(smc_tcp_ls_wq);
3612out_pnet:
3613	smc_pnet_exit();
3614out_nl:
3615	smc_nl_exit();
3616out_ism:
3617	smc_clc_exit();
3618	smc_ism_exit();
3619out_pernet_subsys_stat:
3620	unregister_pernet_subsys(&smc_net_stat_ops);
3621out_pernet_subsys:
3622	unregister_pernet_subsys(&smc_net_ops);
3623
3624	return rc;
3625}
3626
3627static void __exit smc_exit(void)
3628{
3629	static_branch_disable(&tcp_have_smc);
3630	smc_inet_exit();
3631	tcp_unregister_ulp(&smc_ulp_ops);
3632	sock_unregister(PF_SMC);
3633	smc_core_exit();
3634	smc_loopback_exit();
3635	smc_ib_unregister_client();
3636	smc_ism_exit();
3637	destroy_workqueue(smc_close_wq);
3638	destroy_workqueue(smc_tcp_ls_wq);
3639	destroy_workqueue(smc_hs_wq);
3640	proto_unregister(&smc_proto6);
3641	proto_unregister(&smc_proto);
3642	smc_pnet_exit();
3643	smc_nl_exit();
3644	smc_clc_exit();
3645	unregister_pernet_subsys(&smc_net_stat_ops);
3646	unregister_pernet_subsys(&smc_net_ops);
3647	rcu_barrier();
3648}
3649
3650module_init(smc_init);
3651module_exit(smc_exit);
3652
3653MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
3654MODULE_DESCRIPTION("smc socket address family");
3655MODULE_LICENSE("GPL");
3656MODULE_ALIAS_NETPROTO(PF_SMC);
3657MODULE_ALIAS_TCP_ULP("smc");
3658/* 256 for IPPROTO_SMC and 1 for SOCK_STREAM */
3659MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 256, 1);
3660#if IS_ENABLED(CONFIG_IPV6)
3661MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 256, 1);
3662#endif /* CONFIG_IPV6 */
3663MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);