Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
  4 *
  5 *  CLC (connection layer control) handshake over initial TCP socket to
  6 *  prepare for RDMA traffic
  7 *
  8 *  Copyright IBM Corp. 2016, 2018
  9 *
 10 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
 11 */
 12
 13#include <linux/in.h>
 14#include <linux/inetdevice.h>
 15#include <linux/if_ether.h>
 16#include <linux/sched/signal.h>
 17
 18#include <net/addrconf.h>
 19#include <net/sock.h>
 20#include <net/tcp.h>
 21
 22#include "smc.h"
 23#include "smc_core.h"
 24#include "smc_clc.h"
 25#include "smc_ib.h"
 26#include "smc_ism.h"
 27
 28#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
 29#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
 30
 31/* eye catcher "SMCR" EBCDIC for CLC messages */
 32static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
 33/* eye catcher "SMCD" EBCDIC for CLC messages */
 34static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
 35
 36/* check if received message has a correct header length and contains valid
 37 * heading and trailing eyecatchers
 38 */
 39static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
 40{
 41	struct smc_clc_msg_proposal_prefix *pclc_prfx;
 42	struct smc_clc_msg_accept_confirm *clc;
 43	struct smc_clc_msg_proposal *pclc;
 44	struct smc_clc_msg_decline *dclc;
 45	struct smc_clc_msg_trail *trl;
 46
 47	if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
 48	    memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
 49		return false;
 50	switch (clcm->type) {
 51	case SMC_CLC_PROPOSAL:
 52		if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
 53		    clcm->path != SMC_TYPE_B)
 54			return false;
 55		pclc = (struct smc_clc_msg_proposal *)clcm;
 56		pclc_prfx = smc_clc_proposal_get_prefix(pclc);
 57		if (ntohs(pclc->hdr.length) !=
 58			sizeof(*pclc) + ntohs(pclc->iparea_offset) +
 59			sizeof(*pclc_prfx) +
 60			pclc_prfx->ipv6_prefixes_cnt *
 61				sizeof(struct smc_clc_ipv6_prefix) +
 62			sizeof(*trl))
 63			return false;
 64		trl = (struct smc_clc_msg_trail *)
 65			((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
 66		break;
 67	case SMC_CLC_ACCEPT:
 68	case SMC_CLC_CONFIRM:
 69		if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D)
 70			return false;
 71		clc = (struct smc_clc_msg_accept_confirm *)clcm;
 72		if ((clcm->path == SMC_TYPE_R &&
 73		     ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
 74		    (clcm->path == SMC_TYPE_D &&
 75		     ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
 76			return false;
 77		trl = (struct smc_clc_msg_trail *)
 78			((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl));
 79		break;
 80	case SMC_CLC_DECLINE:
 81		dclc = (struct smc_clc_msg_decline *)clcm;
 82		if (ntohs(dclc->hdr.length) != sizeof(*dclc))
 83			return false;
 84		trl = &dclc->trl;
 85		break;
 86	default:
 87		return false;
 88	}
 89	if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
 90	    memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
 91		return false;
 92	return true;
 93}
 94
 95/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
 96static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
 97				 struct smc_clc_msg_proposal_prefix *prop)
 98{
 99	struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
100	const struct in_ifaddr *ifa;
101
102	if (!in_dev)
103		return -ENODEV;
104
105	in_dev_for_each_ifa_rcu(ifa, in_dev) {
106		if (!inet_ifa_match(ipv4, ifa))
107			continue;
108		prop->prefix_len = inet_mask_len(ifa->ifa_mask);
109		prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask;
110		/* prop->ipv6_prefixes_cnt = 0; already done by memset before */
111		return 0;
112	}
113	return -ENOENT;
114}
115
116/* fill CLC proposal msg with ipv6 prefixes from device */
117static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
118				 struct smc_clc_msg_proposal_prefix *prop,
119				 struct smc_clc_ipv6_prefix *ipv6_prfx)
120{
121#if IS_ENABLED(CONFIG_IPV6)
122	struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
123	struct inet6_ifaddr *ifa;
124	int cnt = 0;
125
126	if (!in6_dev)
127		return -ENODEV;
128	/* use a maximum of 8 IPv6 prefixes from device */
129	list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
130		if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
131			continue;
132		ipv6_addr_prefix(&ipv6_prfx[cnt].prefix,
133				 &ifa->addr, ifa->prefix_len);
134		ipv6_prfx[cnt].prefix_len = ifa->prefix_len;
135		cnt++;
136		if (cnt == SMC_CLC_MAX_V6_PREFIX)
137			break;
138	}
139	prop->ipv6_prefixes_cnt = cnt;
140	if (cnt)
141		return 0;
142#endif
143	return -ENOENT;
144}
145
146/* retrieve and set prefixes in CLC proposal msg */
147static int smc_clc_prfx_set(struct socket *clcsock,
148			    struct smc_clc_msg_proposal_prefix *prop,
149			    struct smc_clc_ipv6_prefix *ipv6_prfx)
150{
151	struct dst_entry *dst = sk_dst_get(clcsock->sk);
152	struct sockaddr_storage addrs;
153	struct sockaddr_in6 *addr6;
154	struct sockaddr_in *addr;
155	int rc = -ENOENT;
156
157	memset(prop, 0, sizeof(*prop));
158	if (!dst) {
159		rc = -ENOTCONN;
160		goto out;
161	}
162	if (!dst->dev) {
163		rc = -ENODEV;
164		goto out_rel;
165	}
166	/* get address to which the internal TCP socket is bound */
167	kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
168	/* analyze IP specific data of net_device belonging to TCP socket */
169	addr6 = (struct sockaddr_in6 *)&addrs;
170	rcu_read_lock();
171	if (addrs.ss_family == PF_INET) {
172		/* IPv4 */
173		addr = (struct sockaddr_in *)&addrs;
174		rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
175	} else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
176		/* mapped IPv4 address - peer is IPv4 only */
177		rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
178					   prop);
179	} else {
180		/* IPv6 */
181		rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
182	}
183	rcu_read_unlock();
184out_rel:
185	dst_release(dst);
186out:
187	return rc;
188}
189
190/* match ipv4 addrs of dev against addr in CLC proposal */
191static int smc_clc_prfx_match4_rcu(struct net_device *dev,
192				   struct smc_clc_msg_proposal_prefix *prop)
193{
194	struct in_device *in_dev = __in_dev_get_rcu(dev);
195	const struct in_ifaddr *ifa;
196
197	if (!in_dev)
198		return -ENODEV;
199	in_dev_for_each_ifa_rcu(ifa, in_dev) {
200		if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) &&
201		    inet_ifa_match(prop->outgoing_subnet, ifa))
202			return 0;
203	}
204
205	return -ENOENT;
206}
207
208/* match ipv6 addrs of dev against addrs in CLC proposal */
209static int smc_clc_prfx_match6_rcu(struct net_device *dev,
210				   struct smc_clc_msg_proposal_prefix *prop)
211{
212#if IS_ENABLED(CONFIG_IPV6)
213	struct inet6_dev *in6_dev = __in6_dev_get(dev);
214	struct smc_clc_ipv6_prefix *ipv6_prfx;
215	struct inet6_ifaddr *ifa;
216	int i, max;
217
218	if (!in6_dev)
219		return -ENODEV;
220	/* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */
221	ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop));
222	max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX);
223	list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
224		if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
225			continue;
226		for (i = 0; i < max; i++) {
227			if (ifa->prefix_len == ipv6_prfx[i].prefix_len &&
228			    ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix,
229					      ifa->prefix_len))
230				return 0;
231		}
232	}
233#endif
234	return -ENOENT;
235}
236
237/* check if proposed prefixes match one of our device prefixes */
238int smc_clc_prfx_match(struct socket *clcsock,
239		       struct smc_clc_msg_proposal_prefix *prop)
240{
241	struct dst_entry *dst = sk_dst_get(clcsock->sk);
242	int rc;
243
244	if (!dst) {
245		rc = -ENOTCONN;
246		goto out;
247	}
248	if (!dst->dev) {
249		rc = -ENODEV;
250		goto out_rel;
251	}
252	rcu_read_lock();
253	if (!prop->ipv6_prefixes_cnt)
254		rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
255	else
256		rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
257	rcu_read_unlock();
258out_rel:
259	dst_release(dst);
260out:
261	return rc;
262}
263
264/* Wait for data on the tcp-socket, analyze received data
265 * Returns:
266 * 0 if success and it was not a decline that we received.
267 * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
268 * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
269 */
270int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
271		     u8 expected_type, unsigned long timeout)
272{
273	long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
274	struct sock *clc_sk = smc->clcsock->sk;
275	struct smc_clc_msg_hdr *clcm = buf;
276	struct msghdr msg = {NULL, 0};
277	int reason_code = 0;
278	struct kvec vec = {buf, buflen};
279	int len, datlen;
280	int krflags;
281
282	/* peek the first few bytes to determine length of data to receive
283	 * so we don't consume any subsequent CLC message or payload data
284	 * in the TCP byte stream
285	 */
286	/*
287	 * Caller must make sure that buflen is no less than
288	 * sizeof(struct smc_clc_msg_hdr)
289	 */
290	krflags = MSG_PEEK | MSG_WAITALL;
291	clc_sk->sk_rcvtimeo = timeout;
292	iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
293			sizeof(struct smc_clc_msg_hdr));
294	len = sock_recvmsg(smc->clcsock, &msg, krflags);
295	if (signal_pending(current)) {
296		reason_code = -EINTR;
297		clc_sk->sk_err = EINTR;
298		smc->sk.sk_err = EINTR;
299		goto out;
300	}
301	if (clc_sk->sk_err) {
302		reason_code = -clc_sk->sk_err;
303		if (clc_sk->sk_err == EAGAIN &&
304		    expected_type == SMC_CLC_DECLINE)
305			clc_sk->sk_err = 0; /* reset for fallback usage */
306		else
307			smc->sk.sk_err = clc_sk->sk_err;
308		goto out;
309	}
310	if (!len) { /* peer has performed orderly shutdown */
311		smc->sk.sk_err = ECONNRESET;
312		reason_code = -ECONNRESET;
313		goto out;
314	}
315	if (len < 0) {
316		if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE)
317			smc->sk.sk_err = -len;
318		reason_code = len;
319		goto out;
320	}
321	datlen = ntohs(clcm->length);
322	if ((len < sizeof(struct smc_clc_msg_hdr)) ||
323	    (datlen > buflen) ||
324	    (clcm->version != SMC_CLC_V1) ||
325	    (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
326	     clcm->path != SMC_TYPE_B) ||
327	    ((clcm->type != SMC_CLC_DECLINE) &&
328	     (clcm->type != expected_type))) {
329		smc->sk.sk_err = EPROTO;
330		reason_code = -EPROTO;
331		goto out;
332	}
333
334	/* receive the complete CLC message */
335	memset(&msg, 0, sizeof(struct msghdr));
336	iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
337	krflags = MSG_WAITALL;
338	len = sock_recvmsg(smc->clcsock, &msg, krflags);
339	if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
340		smc->sk.sk_err = EPROTO;
341		reason_code = -EPROTO;
342		goto out;
343	}
344	if (clcm->type == SMC_CLC_DECLINE) {
345		struct smc_clc_msg_decline *dclc;
346
347		dclc = (struct smc_clc_msg_decline *)clcm;
348		reason_code = SMC_CLC_DECL_PEERDECL;
349		smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
350		if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
351			smc->conn.lgr->sync_err = 1;
352			smc_lgr_terminate(smc->conn.lgr);
353		}
354	}
355
356out:
357	clc_sk->sk_rcvtimeo = rcvtimeo;
358	return reason_code;
359}
360
361/* send CLC DECLINE message across internal TCP socket */
362int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
363{
364	struct smc_clc_msg_decline dclc;
365	struct msghdr msg;
366	struct kvec vec;
367	int len;
368
369	memset(&dclc, 0, sizeof(dclc));
370	memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
371	dclc.hdr.type = SMC_CLC_DECLINE;
372	dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
373	dclc.hdr.version = SMC_CLC_V1;
374	dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
375	memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
376	dclc.peer_diagnosis = htonl(peer_diag_info);
377	memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
378
379	memset(&msg, 0, sizeof(msg));
380	vec.iov_base = &dclc;
381	vec.iov_len = sizeof(struct smc_clc_msg_decline);
382	len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
383			     sizeof(struct smc_clc_msg_decline));
384	if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
385		len = -EPROTO;
386	return len > 0 ? 0 : len;
387}
388
389/* send CLC PROPOSAL message across internal TCP socket */
390int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
391			  struct smc_init_info *ini)
392{
393	struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
394	struct smc_clc_msg_proposal_prefix pclc_prfx;
395	struct smc_clc_msg_smcd pclc_smcd;
396	struct smc_clc_msg_proposal pclc;
397	struct smc_clc_msg_trail trl;
398	int len, i, plen, rc;
399	int reason_code = 0;
400	struct kvec vec[5];
401	struct msghdr msg;
402
403	/* retrieve ip prefixes for CLC proposal msg */
404	rc = smc_clc_prfx_set(smc->clcsock, &pclc_prfx, ipv6_prfx);
405	if (rc)
406		return SMC_CLC_DECL_CNFERR; /* configuration error */
407
408	/* send SMC Proposal CLC message */
409	plen = sizeof(pclc) + sizeof(pclc_prfx) +
410	       (pclc_prfx.ipv6_prefixes_cnt * sizeof(ipv6_prfx[0])) +
411	       sizeof(trl);
412	memset(&pclc, 0, sizeof(pclc));
413	memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
414	pclc.hdr.type = SMC_CLC_PROPOSAL;
415	pclc.hdr.version = SMC_CLC_V1;		/* SMC version */
416	pclc.hdr.path = smc_type;
417	if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) {
418		/* add SMC-R specifics */
419		memcpy(pclc.lcl.id_for_peer, local_systemid,
420		       sizeof(local_systemid));
421		memcpy(&pclc.lcl.gid, ini->ib_gid, SMC_GID_SIZE);
422		memcpy(&pclc.lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1],
423		       ETH_ALEN);
424		pclc.iparea_offset = htons(0);
425	}
426	if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
427		/* add SMC-D specifics */
428		memset(&pclc_smcd, 0, sizeof(pclc_smcd));
429		plen += sizeof(pclc_smcd);
430		pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
431		pclc_smcd.gid = ini->ism_dev->local_gid;
432	}
433	pclc.hdr.length = htons(plen);
434
435	memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
436	memset(&msg, 0, sizeof(msg));
437	i = 0;
438	vec[i].iov_base = &pclc;
439	vec[i++].iov_len = sizeof(pclc);
440	if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
441		vec[i].iov_base = &pclc_smcd;
442		vec[i++].iov_len = sizeof(pclc_smcd);
443	}
444	vec[i].iov_base = &pclc_prfx;
445	vec[i++].iov_len = sizeof(pclc_prfx);
446	if (pclc_prfx.ipv6_prefixes_cnt > 0) {
447		vec[i].iov_base = &ipv6_prfx[0];
448		vec[i++].iov_len = pclc_prfx.ipv6_prefixes_cnt *
449				   sizeof(ipv6_prfx[0]);
450	}
451	vec[i].iov_base = &trl;
452	vec[i++].iov_len = sizeof(trl);
453	/* due to the few bytes needed for clc-handshake this cannot block */
454	len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
455	if (len < 0) {
456		smc->sk.sk_err = smc->clcsock->sk->sk_err;
457		reason_code = -smc->sk.sk_err;
458	} else if (len < (int)sizeof(pclc)) {
459		reason_code = -ENETUNREACH;
460		smc->sk.sk_err = -reason_code;
461	}
462
463	return reason_code;
464}
465
466/* send CLC CONFIRM message across internal TCP socket */
467int smc_clc_send_confirm(struct smc_sock *smc)
468{
469	struct smc_connection *conn = &smc->conn;
470	struct smc_clc_msg_accept_confirm cclc;
471	struct smc_link *link;
472	int reason_code = 0;
473	struct msghdr msg;
474	struct kvec vec;
475	int len;
476
477	/* send SMC Confirm CLC msg */
478	memset(&cclc, 0, sizeof(cclc));
479	cclc.hdr.type = SMC_CLC_CONFIRM;
480	cclc.hdr.version = SMC_CLC_V1;		/* SMC version */
481	if (smc->conn.lgr->is_smcd) {
482		/* SMC-D specific settings */
483		memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER,
484		       sizeof(SMCD_EYECATCHER));
485		cclc.hdr.path = SMC_TYPE_D;
486		cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
487		cclc.gid = conn->lgr->smcd->local_gid;
488		cclc.token = conn->rmb_desc->token;
489		cclc.dmbe_size = conn->rmbe_size_short;
490		cclc.dmbe_idx = 0;
491		memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
492		memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
493		       sizeof(SMCD_EYECATCHER));
494	} else {
495		/* SMC-R specific settings */
496		link = &conn->lgr->lnk[SMC_SINGLE_LINK];
497		memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER,
498		       sizeof(SMC_EYECATCHER));
499		cclc.hdr.path = SMC_TYPE_R;
500		cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
501		memcpy(cclc.lcl.id_for_peer, local_systemid,
502		       sizeof(local_systemid));
503		memcpy(&cclc.lcl.gid, link->gid, SMC_GID_SIZE);
504		memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
505		       ETH_ALEN);
506		hton24(cclc.qpn, link->roce_qp->qp_num);
507		cclc.rmb_rkey =
508			htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
509		cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
510		cclc.rmbe_alert_token = htonl(conn->alert_token_local);
511		cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
512		cclc.rmbe_size = conn->rmbe_size_short;
513		cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
514				(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
515		hton24(cclc.psn, link->psn_initial);
516		memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
517		       sizeof(SMC_EYECATCHER));
518	}
519
520	memset(&msg, 0, sizeof(msg));
521	vec.iov_base = &cclc;
522	vec.iov_len = ntohs(cclc.hdr.length);
523	len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
524			     ntohs(cclc.hdr.length));
525	if (len < ntohs(cclc.hdr.length)) {
526		if (len >= 0) {
527			reason_code = -ENETUNREACH;
528			smc->sk.sk_err = -reason_code;
529		} else {
530			smc->sk.sk_err = smc->clcsock->sk->sk_err;
531			reason_code = -smc->sk.sk_err;
532		}
533	}
534	return reason_code;
535}
536
537/* send CLC ACCEPT message across internal TCP socket */
538int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
539{
540	struct smc_connection *conn = &new_smc->conn;
541	struct smc_clc_msg_accept_confirm aclc;
542	struct smc_link *link;
543	struct msghdr msg;
544	struct kvec vec;
545	int len;
546
547	memset(&aclc, 0, sizeof(aclc));
548	aclc.hdr.type = SMC_CLC_ACCEPT;
549	aclc.hdr.version = SMC_CLC_V1;		/* SMC version */
550	if (srv_first_contact)
551		aclc.hdr.flag = 1;
552
553	if (new_smc->conn.lgr->is_smcd) {
554		/* SMC-D specific settings */
555		aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
556		memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER,
557		       sizeof(SMCD_EYECATCHER));
558		aclc.hdr.path = SMC_TYPE_D;
559		aclc.gid = conn->lgr->smcd->local_gid;
560		aclc.token = conn->rmb_desc->token;
561		aclc.dmbe_size = conn->rmbe_size_short;
562		aclc.dmbe_idx = 0;
563		memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
564		memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
565		       sizeof(SMCD_EYECATCHER));
566	} else {
567		/* SMC-R specific settings */
568		aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
569		memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER,
570		       sizeof(SMC_EYECATCHER));
571		aclc.hdr.path = SMC_TYPE_R;
572		link = &conn->lgr->lnk[SMC_SINGLE_LINK];
573		memcpy(aclc.lcl.id_for_peer, local_systemid,
574		       sizeof(local_systemid));
575		memcpy(&aclc.lcl.gid, link->gid, SMC_GID_SIZE);
576		memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
577		       ETH_ALEN);
578		hton24(aclc.qpn, link->roce_qp->qp_num);
579		aclc.rmb_rkey =
580			htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
581		aclc.rmbe_idx = 1;		/* as long as 1 RMB = 1 RMBE */
582		aclc.rmbe_alert_token = htonl(conn->alert_token_local);
583		aclc.qp_mtu = link->path_mtu;
584		aclc.rmbe_size = conn->rmbe_size_short,
585		aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
586				(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
587		hton24(aclc.psn, link->psn_initial);
588		memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
589		       sizeof(SMC_EYECATCHER));
590	}
591
592	memset(&msg, 0, sizeof(msg));
593	vec.iov_base = &aclc;
594	vec.iov_len = ntohs(aclc.hdr.length);
595	len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
596			     ntohs(aclc.hdr.length));
597	if (len < ntohs(aclc.hdr.length))
598		len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
599
600	return len > 0 ? 0 : len;
601}