Linux Audio

Check our new training course

Loading...
v4.6
 
  1/******************************************************************************
  2*******************************************************************************
  3**
  4**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
  5**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  6**
  7**  This copyrighted material is made available to anyone wishing to use,
  8**  modify, copy, or redistribute it subject to the terms and conditions
  9**  of the GNU General Public License v.2.
 10**
 11*******************************************************************************
 12******************************************************************************/
 13
 14/*
 15 * midcomms.c
 16 *
 17 * This is the appallingly named "mid-level" comms layer.
 
 
 18 *
 19 * Its purpose is to take packets from the "real" comms layer,
 20 * split them up into packets and pass them to the interested
 21 * part of the locking mechanism.
 22 *
 23 * It also takes messages from the locking layer, formats them
 24 * into packets and sends them to the comms layer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25 */
 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27#include "dlm_internal.h"
 28#include "lowcomms.h"
 29#include "config.h"
 
 30#include "lock.h"
 
 31#include "midcomms.h"
 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34static void copy_from_cb(void *dst, const void *base, unsigned offset,
 35			 unsigned len, unsigned limit)
 36{
 37	unsigned copy = len;
 
 38
 39	if ((copy + offset) > limit)
 40		copy = limit - offset;
 41	memcpy(dst, base + offset, copy);
 42	len -= copy;
 43	if (len)
 44		memcpy(dst + copy, base, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45}
 46
 47/*
 48 * Called from the low-level comms layer to process a buffer of
 49 * commands.
 50 *
 51 * Only complete messages are processed here, any "spare" bytes from
 52 * the end of a buffer are saved and tacked onto the front of the next
 53 * message that comes in. I doubt this will happen very often but we
 54 * need to be able to cope with it and I don't want the task to be waiting
 55 * for packets to come in when there is useful work to be done.
 56 */
 57
 58int dlm_process_incoming_buffer(int nodeid, const void *base,
 59				unsigned offset, unsigned len, unsigned limit)
 60{
 61	union {
 62		unsigned char __buf[DLM_INBUF_LEN];
 63		/* this is to force proper alignment on some arches */
 64		union dlm_packet p;
 65	} __tmp;
 66	union dlm_packet *p = &__tmp.p;
 67	int ret = 0;
 68	int err = 0;
 69	uint16_t msglen;
 70	uint32_t lockspace;
 71
 72	while (len > sizeof(struct dlm_header)) {
 
 
 
 73
 74		/* Copy just the header to check the total length.  The
 75		   message may wrap around the end of the buffer back to the
 76		   start, so we need to use a temp buffer and copy_from_cb. */
 
 77
 78		copy_from_cb(p, base, offset, sizeof(struct dlm_header),
 79			     limit);
 
 
 80
 81		msglen = le16_to_cpu(p->header.h_length);
 82		lockspace = p->header.h_lockspace;
 
 
 
 
 
 
 83
 84		err = -EINVAL;
 85		if (msglen < sizeof(struct dlm_header))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86			break;
 87		if (p->header.h_cmd == DLM_MSG) {
 88			if (msglen < sizeof(struct dlm_message))
 89				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90		} else {
 91			if (msglen < sizeof(struct dlm_rcom))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93		}
 94		err = -E2BIG;
 95		if (msglen > dlm_config.ci_buffer_size) {
 96			log_print("message size %d from %d too big, buf len %d",
 97				  msglen, nodeid, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98			break;
 99		}
100		err = 0;
101
102		/* If only part of the full message is contained in this
103		   buffer, then do nothing and wait for lowcomms to call
104		   us again later with more data.  We return 0 meaning
105		   we've consumed none of the input buffer. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
107		if (msglen > len)
108			break;
 
 
 
 
 
 
109
110		/* Allocate a larger temp buffer if the full message won't fit
111		   in the buffer on the stack (which should work for most
112		   ordinary messages). */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
114		if (msglen > sizeof(__tmp) && p == &__tmp.p) {
115			p = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
116			if (p == NULL)
117				return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118		}
119
120		copy_from_cb(p, base, offset, msglen, limit);
 
 
 
 
 
 
 
 
121
122		BUG_ON(lockspace != p->header.h_lockspace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
124		ret += msglen;
125		offset += msglen;
126		offset &= (limit - 1);
127		len -= msglen;
 
 
128
129		dlm_receive_buffer(p, nodeid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
132	if (p != &__tmp.p)
133		kfree(p);
 
 
 
 
 
 
 
 
 
 
 
134
135	return err ? err : ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136}
137
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/******************************************************************************
   3*******************************************************************************
   4**
   5**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
   6**  Copyright (C) 2004-2021 Red Hat, Inc.  All rights reserved.
   7**
 
 
 
   8**
   9*******************************************************************************
  10******************************************************************************/
  11
  12/*
  13 * midcomms.c
  14 *
  15 * This is the appallingly named "mid-level" comms layer. It takes care about
  16 * deliver an on application layer "reliable" communication above the used
  17 * lowcomms transport layer.
  18 *
  19 * How it works:
 
 
  20 *
  21 * Each nodes keeps track of all send DLM messages in send_queue with a sequence
  22 * number. The receive will send an DLM_ACK message back for every DLM message
  23 * received at the other side. If a reconnect happens in lowcomms we will send
  24 * all unacknowledged dlm messages again. The receiving side might drop any already
  25 * received message by comparing sequence numbers.
  26 *
  27 * How version detection works:
  28 *
  29 * Due the fact that dlm has pre-configured node addresses on every side
  30 * it is in it's nature that every side connects at starts to transmit
  31 * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS
  32 * and their replies are the first messages which are exchanges. Due backwards
  33 * compatibility these messages are not covered by the midcomms re-transmission
  34 * layer. These messages have their own re-transmission handling in the dlm
  35 * application layer. The version field of every node will be set on these RCOM
  36 * messages as soon as they arrived and the node isn't yet part of the nodes
  37 * hash. There exists also logic to detect version mismatched if something weird
  38 * going on or the first messages isn't an expected one.
  39 *
  40 * Termination:
  41 *
  42 * The midcomms layer does a 4 way handshake for termination on DLM protocol
  43 * like TCP supports it with half-closed socket support. SCTP doesn't support
  44 * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be
  45 * interrupted by .e.g. tcp reset itself. Additional there exists the othercon
  46 * paradigm in lowcomms which cannot be easily without breaking backwards
  47 * compatibility. A node cannot send anything to another node when a DLM_FIN
  48 * message was send. There exists additional logic to print a warning if
  49 * DLM wants to do it. There exists a state handling like RFC 793 but reduced
  50 * to termination only. The event "member removal event" describes the cluster
  51 * manager removed the node from internal lists, at this point DLM does not
  52 * send any message to the other node. There exists two cases:
  53 *
  54 * 1. The cluster member was removed and we received a FIN
  55 * OR
  56 * 2. We received a FIN but the member was not removed yet
  57 *
  58 * One of these cases will do the CLOSE_WAIT to LAST_ACK change.
  59 *
  60 *
  61 *                              +---------+
  62 *                              | CLOSED  |
  63 *                              +---------+
  64 *                                   | add member/receive RCOM version
  65 *                                   |            detection msg
  66 *                                   V
  67 *                              +---------+
  68 *                              |  ESTAB  |
  69 *                              +---------+
  70 *                       CLOSE    |     |    rcv FIN
  71 *                      -------   |     |    -------
  72 * +---------+          snd FIN  /       \   snd ACK          +---------+
  73 * |  FIN    |<-----------------           ------------------>|  CLOSE  |
  74 * | WAIT-1  |------------------                              |   WAIT  |
  75 * +---------+          rcv FIN  \                            +---------+
  76 * | rcv ACK of FIN   -------   |                            CLOSE  | member
  77 * | --------------   snd ACK   |                           ------- | removal
  78 * V        x                   V                           snd FIN V event
  79 * +---------+                  +---------+                   +---------+
  80 * |FINWAIT-2|                  | CLOSING |                   | LAST-ACK|
  81 * +---------+                  +---------+                   +---------+
  82 * |                rcv ACK of FIN |                 rcv ACK of FIN |
  83 * |  rcv FIN       -------------- |                 -------------- |
  84 * |  -------              x       V                        x       V
  85 *  \ snd ACK                 +---------+                   +---------+
  86 *   ------------------------>| CLOSED  |                   | CLOSED  |
  87 *                            +---------+                   +---------+
  88 *
  89 * NOTE: any state can interrupted by midcomms_close() and state will be
  90 * switched to CLOSED in case of fencing. There exists also some timeout
  91 * handling when we receive the version detection RCOM messages which is
  92 * made by observation.
  93 *
  94 * Future improvements:
  95 *
  96 * There exists some known issues/improvements of the dlm handling. Some
  97 * of them should be done in a next major dlm version bump which makes
  98 * it incompatible with previous versions.
  99 *
 100 * Unaligned memory access:
 101 *
 102 * There exists cases when the dlm message buffer length is not aligned
 103 * to 8 byte. However seems nobody detected any problem with it. This
 104 * can be fixed in the next major version bump of dlm.
 105 *
 106 * Version detection:
 107 *
 108 * The version detection and how it's done is related to backwards
 109 * compatibility. There exists better ways to make a better handling.
 110 * However this should be changed in the next major version bump of dlm.
 111 *
 112 * Tail Size checking:
 113 *
 114 * There exists a message tail payload in e.g. DLM_MSG however we don't
 115 * check it against the message length yet regarding to the receive buffer
 116 * length. That need to be validated.
 117 *
 118 * Fencing bad nodes:
 119 *
 120 * At timeout places or weird sequence number behaviours we should send
 121 * a fencing request to the cluster manager.
 122 */
 123
 124/* Debug switch to enable a 5 seconds sleep waiting of a termination.
 125 * This can be useful to test fencing while termination is running.
 126 * This requires a setup with only gfs2 as dlm user, so that the
 127 * last umount will terminate the connection.
 128 *
 129 * However it became useful to test, while the 5 seconds block in umount
 130 * just press the reset button. In a lot of dropping the termination
 131 * process can could take several seconds.
 132 */
 133#define DLM_DEBUG_FENCE_TERMINATION	0
 134
 135#include <trace/events/dlm.h>
 136#include <net/tcp.h>
 137
 138#include "dlm_internal.h"
 139#include "lowcomms.h"
 140#include "config.h"
 141#include "memory.h"
 142#include "lock.h"
 143#include "util.h"
 144#include "midcomms.h"
 145
 146/* init value for sequence numbers for testing purpose only e.g. overflows */
 147#define DLM_SEQ_INIT		0
 148/* 5 seconds wait to sync ending of dlm */
 149#define DLM_SHUTDOWN_TIMEOUT	msecs_to_jiffies(5000)
 150#define DLM_VERSION_NOT_SET	0
 151#define DLM_SEND_ACK_BACK_MSG_THRESHOLD 32
 152#define DLM_RECV_ACK_BACK_MSG_THRESHOLD (DLM_SEND_ACK_BACK_MSG_THRESHOLD * 8)
 153
 154struct midcomms_node {
 155	int nodeid;
 156	uint32_t version;
 157	atomic_t seq_send;
 158	atomic_t seq_next;
 159	/* These queues are unbound because we cannot drop any message in dlm.
 160	 * We could send a fence signal for a specific node to the cluster
 161	 * manager if queues hits some maximum value, however this handling
 162	 * not supported yet.
 163	 */
 164	struct list_head send_queue;
 165	spinlock_t send_queue_lock;
 166	atomic_t send_queue_cnt;
 167#define DLM_NODE_FLAG_CLOSE	1
 168#define DLM_NODE_FLAG_STOP_TX	2
 169#define DLM_NODE_FLAG_STOP_RX	3
 170	atomic_t ulp_delivered;
 171	unsigned long flags;
 172	wait_queue_head_t shutdown_wait;
 173
 174	/* dlm tcp termination state */
 175#define DLM_CLOSED	1
 176#define DLM_ESTABLISHED	2
 177#define DLM_FIN_WAIT1	3
 178#define DLM_FIN_WAIT2	4
 179#define DLM_CLOSE_WAIT	5
 180#define DLM_LAST_ACK	6
 181#define DLM_CLOSING	7
 182	int state;
 183	spinlock_t state_lock;
 184
 185	/* counts how many lockspaces are using this node
 186	 * this refcount is necessary to determine if the
 187	 * node wants to disconnect.
 188	 */
 189	int users;
 190
 191	/* not protected by srcu, node_hash lifetime */
 192	void *debugfs;
 193
 194	struct hlist_node hlist;
 195	struct rcu_head rcu;
 196};
 197
 198struct dlm_mhandle {
 199	const union dlm_packet *inner_p;
 200	struct midcomms_node *node;
 201	struct dlm_opts *opts;
 202	struct dlm_msg *msg;
 203	bool committed;
 204	uint32_t seq;
 205
 206	void (*ack_rcv)(struct midcomms_node *node);
 207
 208	/* get_mhandle/commit srcu idx exchange */
 209	int idx;
 210
 211	struct list_head list;
 212	struct rcu_head rcu;
 213};
 214
 215static struct hlist_head node_hash[CONN_HASH_SIZE];
 216static DEFINE_SPINLOCK(nodes_lock);
 217DEFINE_STATIC_SRCU(nodes_srcu);
 218
 219/* This mutex prevents that midcomms_close() is running while
 220 * stop() or remove(). As I experienced invalid memory access
 221 * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and
 222 * resetting machines. I will end in some double deletion in nodes
 223 * datastructure.
 224 */
 225static DEFINE_MUTEX(close_lock);
 226
 227struct kmem_cache *dlm_midcomms_cache_create(void)
 
 228{
 229	return KMEM_CACHE(dlm_mhandle, 0);
 230}
 231
 232static inline const char *dlm_state_str(int state)
 233{
 234	switch (state) {
 235	case DLM_CLOSED:
 236		return "CLOSED";
 237	case DLM_ESTABLISHED:
 238		return "ESTABLISHED";
 239	case DLM_FIN_WAIT1:
 240		return "FIN_WAIT1";
 241	case DLM_FIN_WAIT2:
 242		return "FIN_WAIT2";
 243	case DLM_CLOSE_WAIT:
 244		return "CLOSE_WAIT";
 245	case DLM_LAST_ACK:
 246		return "LAST_ACK";
 247	case DLM_CLOSING:
 248		return "CLOSING";
 249	default:
 250		return "UNKNOWN";
 251	}
 252}
 253
 254const char *dlm_midcomms_state(struct midcomms_node *node)
 255{
 256	return dlm_state_str(node->state);
 257}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258
 259unsigned long dlm_midcomms_flags(struct midcomms_node *node)
 260{
 261	return node->flags;
 262}
 263
 264int dlm_midcomms_send_queue_cnt(struct midcomms_node *node)
 265{
 266	return atomic_read(&node->send_queue_cnt);
 267}
 268
 269uint32_t dlm_midcomms_version(struct midcomms_node *node)
 270{
 271	return node->version;
 272}
 273
 274static struct midcomms_node *__find_node(int nodeid, int r)
 275{
 276	struct midcomms_node *node;
 277
 278	hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
 279		if (node->nodeid == nodeid)
 280			return node;
 281	}
 282
 283	return NULL;
 284}
 285
 286static void dlm_mhandle_release(struct rcu_head *rcu)
 287{
 288	struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu);
 289
 290	dlm_lowcomms_put_msg(mh->msg);
 291	dlm_free_mhandle(mh);
 292}
 293
 294static void dlm_mhandle_delete(struct midcomms_node *node,
 295			       struct dlm_mhandle *mh)
 296{
 297	list_del_rcu(&mh->list);
 298	atomic_dec(&node->send_queue_cnt);
 299	call_rcu(&mh->rcu, dlm_mhandle_release);
 300}
 301
 302static void dlm_send_queue_flush(struct midcomms_node *node)
 303{
 304	struct dlm_mhandle *mh;
 305
 306	pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
 307
 308	rcu_read_lock();
 309	spin_lock_bh(&node->send_queue_lock);
 310	list_for_each_entry_rcu(mh, &node->send_queue, list) {
 311		dlm_mhandle_delete(node, mh);
 312	}
 313	spin_unlock_bh(&node->send_queue_lock);
 314	rcu_read_unlock();
 315}
 316
 317static void midcomms_node_reset(struct midcomms_node *node)
 318{
 319	pr_debug("reset node %d\n", node->nodeid);
 320
 321	atomic_set(&node->seq_next, DLM_SEQ_INIT);
 322	atomic_set(&node->seq_send, DLM_SEQ_INIT);
 323	atomic_set(&node->ulp_delivered, 0);
 324	node->version = DLM_VERSION_NOT_SET;
 325	node->flags = 0;
 326
 327	dlm_send_queue_flush(node);
 328	node->state = DLM_CLOSED;
 329	wake_up(&node->shutdown_wait);
 330}
 331
 332static struct midcomms_node *nodeid2node(int nodeid)
 333{
 334	return __find_node(nodeid, nodeid_hash(nodeid));
 335}
 336
 337int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr)
 338{
 339	int ret, idx, r = nodeid_hash(nodeid);
 340	struct midcomms_node *node;
 341
 342	ret = dlm_lowcomms_addr(nodeid, addr);
 343	if (ret)
 344		return ret;
 345
 346	idx = srcu_read_lock(&nodes_srcu);
 347	node = __find_node(nodeid, r);
 348	if (node) {
 349		srcu_read_unlock(&nodes_srcu, idx);
 350		return 0;
 351	}
 352	srcu_read_unlock(&nodes_srcu, idx);
 353
 354	node = kmalloc(sizeof(*node), GFP_NOFS);
 355	if (!node)
 356		return -ENOMEM;
 357
 358	node->nodeid = nodeid;
 359	spin_lock_init(&node->state_lock);
 360	spin_lock_init(&node->send_queue_lock);
 361	atomic_set(&node->send_queue_cnt, 0);
 362	INIT_LIST_HEAD(&node->send_queue);
 363	init_waitqueue_head(&node->shutdown_wait);
 364	node->users = 0;
 365	midcomms_node_reset(node);
 366
 367	spin_lock_bh(&nodes_lock);
 368	hlist_add_head_rcu(&node->hlist, &node_hash[r]);
 369	spin_unlock_bh(&nodes_lock);
 370
 371	node->debugfs = dlm_create_debug_comms_file(nodeid, node);
 372	return 0;
 373}
 374
 375static int dlm_send_ack(int nodeid, uint32_t seq)
 376{
 377	int mb_len = sizeof(struct dlm_header);
 378	struct dlm_header *m_header;
 379	struct dlm_msg *msg;
 380	char *ppc;
 381
 382	msg = dlm_lowcomms_new_msg(nodeid, mb_len, &ppc, NULL, NULL);
 383	if (!msg)
 384		return -ENOMEM;
 385
 386	m_header = (struct dlm_header *)ppc;
 387
 388	m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
 389	m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
 390	m_header->h_length = cpu_to_le16(mb_len);
 391	m_header->h_cmd = DLM_ACK;
 392	m_header->u.h_seq = cpu_to_le32(seq);
 393
 394	dlm_lowcomms_commit_msg(msg);
 395	dlm_lowcomms_put_msg(msg);
 396
 397	return 0;
 398}
 399
 400static void dlm_send_ack_threshold(struct midcomms_node *node,
 401				   uint32_t threshold)
 402{
 403	uint32_t oval, nval;
 404	bool send_ack;
 405
 406	/* let only send one user trigger threshold to send ack back */
 407	do {
 408		oval = atomic_read(&node->ulp_delivered);
 409		send_ack = (oval > threshold);
 410		/* abort if threshold is not reached */
 411		if (!send_ack)
 412			break;
 413
 414		nval = 0;
 415		/* try to reset ulp_delivered counter */
 416	} while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval);
 417
 418	if (send_ack)
 419		dlm_send_ack(node->nodeid, atomic_read(&node->seq_next));
 420}
 421
 422static int dlm_send_fin(struct midcomms_node *node,
 423			void (*ack_rcv)(struct midcomms_node *node))
 424{
 425	int mb_len = sizeof(struct dlm_header);
 426	struct dlm_header *m_header;
 427	struct dlm_mhandle *mh;
 428	char *ppc;
 429
 430	mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc);
 431	if (!mh)
 432		return -ENOMEM;
 433
 434	set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
 435	mh->ack_rcv = ack_rcv;
 436
 437	m_header = (struct dlm_header *)ppc;
 438
 439	m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
 440	m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
 441	m_header->h_length = cpu_to_le16(mb_len);
 442	m_header->h_cmd = DLM_FIN;
 443
 444	pr_debug("sending fin msg to node %d\n", node->nodeid);
 445	dlm_midcomms_commit_mhandle(mh, NULL, 0);
 446
 447	return 0;
 448}
 449
 450static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
 451{
 452	struct dlm_mhandle *mh;
 453
 454	rcu_read_lock();
 455	list_for_each_entry_rcu(mh, &node->send_queue, list) {
 456		if (before(mh->seq, seq)) {
 457			if (mh->ack_rcv)
 458				mh->ack_rcv(node);
 459		} else {
 460			/* send queue should be ordered */
 461			break;
 462		}
 463	}
 464
 465	spin_lock_bh(&node->send_queue_lock);
 466	list_for_each_entry_rcu(mh, &node->send_queue, list) {
 467		if (before(mh->seq, seq)) {
 468			dlm_mhandle_delete(node, mh);
 469		} else {
 470			/* send queue should be ordered */
 471			break;
 472		}
 473	}
 474	spin_unlock_bh(&node->send_queue_lock);
 475	rcu_read_unlock();
 476}
 477
 478static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
 479{
 480	spin_lock_bh(&node->state_lock);
 481	pr_debug("receive passive fin ack from node %d with state %s\n",
 482		 node->nodeid, dlm_state_str(node->state));
 483
 484	switch (node->state) {
 485	case DLM_LAST_ACK:
 486		/* DLM_CLOSED */
 487		midcomms_node_reset(node);
 488		break;
 489	case DLM_CLOSED:
 490		/* not valid but somehow we got what we want */
 491		wake_up(&node->shutdown_wait);
 492		break;
 493	default:
 494		spin_unlock_bh(&node->state_lock);
 495		log_print("%s: unexpected state: %d",
 496			  __func__, node->state);
 497		WARN_ON_ONCE(1);
 498		return;
 499	}
 500	spin_unlock_bh(&node->state_lock);
 501}
 502
 503static void dlm_receive_buffer_3_2_trace(uint32_t seq,
 504					 const union dlm_packet *p)
 505{
 506	switch (p->header.h_cmd) {
 507	case DLM_MSG:
 508		trace_dlm_recv_message(dlm_our_nodeid(), seq, &p->message);
 509		break;
 510	case DLM_RCOM:
 511		trace_dlm_recv_rcom(dlm_our_nodeid(), seq, &p->rcom);
 512		break;
 513	default:
 514		break;
 515	}
 516}
 517
 518static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
 519					struct midcomms_node *node,
 520					uint32_t seq)
 521{
 522	bool is_expected_seq;
 523	uint32_t oval, nval;
 524
 525	do {
 526		oval = atomic_read(&node->seq_next);
 527		is_expected_seq = (oval == seq);
 528		if (!is_expected_seq)
 529			break;
 530
 531		nval = oval + 1;
 532	} while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval);
 533
 534	if (is_expected_seq) {
 535		switch (p->header.h_cmd) {
 536		case DLM_FIN:
 537			spin_lock_bh(&node->state_lock);
 538			pr_debug("receive fin msg from node %d with state %s\n",
 539				 node->nodeid, dlm_state_str(node->state));
 540
 541			switch (node->state) {
 542			case DLM_ESTABLISHED:
 543				dlm_send_ack(node->nodeid, nval);
 544
 545				/* passive shutdown DLM_LAST_ACK case 1
 546				 * additional we check if the node is used by
 547				 * cluster manager events at all.
 548				 */
 549				if (node->users == 0) {
 550					node->state = DLM_LAST_ACK;
 551					pr_debug("switch node %d to state %s case 1\n",
 552						 node->nodeid, dlm_state_str(node->state));
 553					set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
 554					dlm_send_fin(node, dlm_pas_fin_ack_rcv);
 555				} else {
 556					node->state = DLM_CLOSE_WAIT;
 557					pr_debug("switch node %d to state %s\n",
 558						 node->nodeid, dlm_state_str(node->state));
 559				}
 560				break;
 561			case DLM_FIN_WAIT1:
 562				dlm_send_ack(node->nodeid, nval);
 563				node->state = DLM_CLOSING;
 564				set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
 565				pr_debug("switch node %d to state %s\n",
 566					 node->nodeid, dlm_state_str(node->state));
 567				break;
 568			case DLM_FIN_WAIT2:
 569				dlm_send_ack(node->nodeid, nval);
 570				midcomms_node_reset(node);
 571				pr_debug("switch node %d to state %s\n",
 572					 node->nodeid, dlm_state_str(node->state));
 573				break;
 574			case DLM_LAST_ACK:
 575				/* probably remove_member caught it, do nothing */
 576				break;
 577			default:
 578				spin_unlock_bh(&node->state_lock);
 579				log_print("%s: unexpected state: %d",
 580					  __func__, node->state);
 581				WARN_ON_ONCE(1);
 582				return;
 583			}
 584			spin_unlock_bh(&node->state_lock);
 585			break;
 586		default:
 587			WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
 588			dlm_receive_buffer_3_2_trace(seq, p);
 589			dlm_receive_buffer(p, node->nodeid);
 590			atomic_inc(&node->ulp_delivered);
 591			/* unlikely case to send ack back when we don't transmit */
 592			dlm_send_ack_threshold(node, DLM_RECV_ACK_BACK_MSG_THRESHOLD);
 593			break;
 594		}
 595	} else {
 596		/* retry to ack message which we already have by sending back
 597		 * current node->seq_next number as ack.
 598		 */
 599		if (seq < oval)
 600			dlm_send_ack(node->nodeid, oval);
 601
 602		log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
 603				      seq, oval, node->nodeid);
 604	}
 605}
 606
 607static int dlm_opts_check_msglen(const union dlm_packet *p, uint16_t msglen,
 608				 int nodeid)
 609{
 610	int len = msglen;
 611
 612	/* we only trust outer header msglen because
 613	 * it's checked against receive buffer length.
 614	 */
 615	if (len < sizeof(struct dlm_opts))
 616		return -1;
 617	len -= sizeof(struct dlm_opts);
 618
 619	if (len < le16_to_cpu(p->opts.o_optlen))
 620		return -1;
 621	len -= le16_to_cpu(p->opts.o_optlen);
 622
 623	switch (p->opts.o_nextcmd) {
 624	case DLM_FIN:
 625		if (len < sizeof(struct dlm_header)) {
 626			log_print("fin too small: %d, will skip this message from node %d",
 627				  len, nodeid);
 628			return -1;
 629		}
 630
 631		break;
 632	case DLM_MSG:
 633		if (len < sizeof(struct dlm_message)) {
 634			log_print("msg too small: %d, will skip this message from node %d",
 635				  msglen, nodeid);
 636			return -1;
 637		}
 638
 639		break;
 640	case DLM_RCOM:
 641		if (len < sizeof(struct dlm_rcom)) {
 642			log_print("rcom msg too small: %d, will skip this message from node %d",
 643				  len, nodeid);
 644			return -1;
 645		}
 646
 647		break;
 648	default:
 649		log_print("unsupported o_nextcmd received: %u, will skip this message from node %d",
 650			  p->opts.o_nextcmd, nodeid);
 651		return -1;
 652	}
 653
 654	return 0;
 655}
 656
 657static void dlm_midcomms_receive_buffer_3_2(const union dlm_packet *p, int nodeid)
 658{
 659	uint16_t msglen = le16_to_cpu(p->header.h_length);
 660	struct midcomms_node *node;
 661	uint32_t seq;
 662	int ret, idx;
 663
 664	idx = srcu_read_lock(&nodes_srcu);
 665	node = nodeid2node(nodeid);
 666	if (WARN_ON_ONCE(!node))
 667		goto out;
 668
 669	switch (node->version) {
 670	case DLM_VERSION_NOT_SET:
 671		node->version = DLM_VERSION_3_2;
 672		wake_up(&node->shutdown_wait);
 673		log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
 674			  node->nodeid);
 675
 676		spin_lock(&node->state_lock);
 677		switch (node->state) {
 678		case DLM_CLOSED:
 679			node->state = DLM_ESTABLISHED;
 680			pr_debug("switch node %d to state %s\n",
 681				 node->nodeid, dlm_state_str(node->state));
 682			break;
 683		default:
 684			break;
 685		}
 686		spin_unlock(&node->state_lock);
 687
 688		break;
 689	case DLM_VERSION_3_2:
 690		break;
 691	default:
 692		log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
 693				      DLM_VERSION_3_2, node->nodeid, node->version);
 694		goto out;
 695	}
 696
 697	switch (p->header.h_cmd) {
 698	case DLM_RCOM:
 699		/* these rcom message we use to determine version.
 700		 * they have their own retransmission handling and
 701		 * are the first messages of dlm.
 702		 *
 703		 * length already checked.
 704		 */
 705		switch (p->rcom.rc_type) {
 706		case cpu_to_le32(DLM_RCOM_NAMES):
 707			fallthrough;
 708		case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
 709			fallthrough;
 710		case cpu_to_le32(DLM_RCOM_STATUS):
 711			fallthrough;
 712		case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
 713			break;
 714		default:
 715			log_print("unsupported rcom type received: %u, will skip this message from node %d",
 716				  le32_to_cpu(p->rcom.rc_type), nodeid);
 717			goto out;
 718		}
 719
 720		WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
 721		dlm_receive_buffer(p, nodeid);
 722		break;
 723	case DLM_OPTS:
 724		seq = le32_to_cpu(p->header.u.h_seq);
 725
 726		ret = dlm_opts_check_msglen(p, msglen, nodeid);
 727		if (ret < 0) {
 728			log_print("opts msg too small: %u, will skip this message from node %d",
 729				  msglen, nodeid);
 730			goto out;
 731		}
 732
 733		p = (union dlm_packet *)((unsigned char *)p->opts.o_opts +
 734					 le16_to_cpu(p->opts.o_optlen));
 735
 736		/* recheck inner msglen just if it's not garbage */
 737		msglen = le16_to_cpu(p->header.h_length);
 738		switch (p->header.h_cmd) {
 739		case DLM_RCOM:
 740			if (msglen < sizeof(struct dlm_rcom)) {
 741				log_print("inner rcom msg too small: %u, will skip this message from node %d",
 742					  msglen, nodeid);
 743				goto out;
 744			}
 745
 
 746			break;
 747		case DLM_MSG:
 748			if (msglen < sizeof(struct dlm_message)) {
 749				log_print("inner msg too small: %u, will skip this message from node %d",
 750					  msglen, nodeid);
 751				goto out;
 752			}
 753
 754			break;
 755		case DLM_FIN:
 756			if (msglen < sizeof(struct dlm_header)) {
 757				log_print("inner fin too small: %u, will skip this message from node %d",
 758					  msglen, nodeid);
 759				goto out;
 760			}
 761
 762			break;
 763		default:
 764			log_print("unsupported inner h_cmd received: %u, will skip this message from node %d",
 765				  msglen, nodeid);
 766			goto out;
 767		}
 768
 769		dlm_midcomms_receive_buffer(p, node, seq);
 770		break;
 771	case DLM_ACK:
 772		seq = le32_to_cpu(p->header.u.h_seq);
 773		dlm_receive_ack(node, seq);
 774		break;
 775	default:
 776		log_print("unsupported h_cmd received: %u, will skip this message from node %d",
 777			  p->header.h_cmd, nodeid);
 778		break;
 779	}
 780
 781out:
 782	srcu_read_unlock(&nodes_srcu, idx);
 783}
 784
 785static void dlm_midcomms_receive_buffer_3_1(const union dlm_packet *p, int nodeid)
 786{
 787	uint16_t msglen = le16_to_cpu(p->header.h_length);
 788	struct midcomms_node *node;
 789	int idx;
 790
 791	idx = srcu_read_lock(&nodes_srcu);
 792	node = nodeid2node(nodeid);
 793	if (WARN_ON_ONCE(!node)) {
 794		srcu_read_unlock(&nodes_srcu, idx);
 795		return;
 796	}
 797
 798	switch (node->version) {
 799	case DLM_VERSION_NOT_SET:
 800		node->version = DLM_VERSION_3_1;
 801		wake_up(&node->shutdown_wait);
 802		log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1,
 803			  node->nodeid);
 804		break;
 805	case DLM_VERSION_3_1:
 806		break;
 807	default:
 808		log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
 809				      DLM_VERSION_3_1, node->nodeid, node->version);
 810		srcu_read_unlock(&nodes_srcu, idx);
 811		return;
 812	}
 813	srcu_read_unlock(&nodes_srcu, idx);
 814
 815	switch (p->header.h_cmd) {
 816	case DLM_RCOM:
 817		/* length already checked */
 818		break;
 819	case DLM_MSG:
 820		if (msglen < sizeof(struct dlm_message)) {
 821			log_print("msg too small: %u, will skip this message from node %d",
 822				  msglen, nodeid);
 823			return;
 824		}
 825
 826		break;
 827	default:
 828		log_print("unsupported h_cmd received: %u, will skip this message from node %d",
 829			  p->header.h_cmd, nodeid);
 830		return;
 831	}
 832
 833	dlm_receive_buffer(p, nodeid);
 834}
 835
 836int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len)
 837{
 838	const unsigned char *ptr = buf;
 839	const struct dlm_header *hd;
 840	uint16_t msglen;
 841	int ret = 0;
 842
 843	while (len >= sizeof(struct dlm_header)) {
 844		hd = (struct dlm_header *)ptr;
 845
 846		/* no message should be more than DLM_MAX_SOCKET_BUFSIZE or
 847		 * less than dlm_header size.
 848		 *
 849		 * Some messages does not have a 8 byte length boundary yet
 850		 * which can occur in a unaligned memory access of some dlm
 851		 * messages. However this problem need to be fixed at the
 852		 * sending side, for now it seems nobody run into architecture
 853		 * related issues yet but it slows down some processing.
 854		 * Fixing this issue should be scheduled in future by doing
 855		 * the next major version bump.
 856		 */
 857		msglen = le16_to_cpu(hd->h_length);
 858		if (msglen > DLM_MAX_SOCKET_BUFSIZE ||
 859		    msglen < sizeof(struct dlm_header)) {
 860			log_print("received invalid length header: %u from node %d, will abort message parsing",
 861				  msglen, nodeid);
 862			return -EBADMSG;
 863		}
 864
 865		/* caller will take care that leftover
 866		 * will be parsed next call with more data
 867		 */
 868		if (msglen > len)
 869			break;
 870
 871		ret += msglen;
 
 
 872		len -= msglen;
 873		ptr += msglen;
 874	}
 875
 876	return ret;
 877}
 878
 879/*
 880 * Called from the low-level comms layer to process a buffer of
 881 * commands.
 882 */
 883int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
 884{
 885	const unsigned char *ptr = buf;
 886	const struct dlm_header *hd;
 887	uint16_t msglen;
 888	int ret = 0;
 889
 890	while (len >= sizeof(struct dlm_header)) {
 891		hd = (struct dlm_header *)ptr;
 892
 893		msglen = le16_to_cpu(hd->h_length);
 894		if (msglen > len)
 895			break;
 896
 897		switch (hd->h_version) {
 898		case cpu_to_le32(DLM_VERSION_3_1):
 899			dlm_midcomms_receive_buffer_3_1((const union dlm_packet *)ptr, nodeid);
 900			break;
 901		case cpu_to_le32(DLM_VERSION_3_2):
 902			dlm_midcomms_receive_buffer_3_2((const union dlm_packet *)ptr, nodeid);
 903			break;
 904		default:
 905			log_print("received invalid version header: %u from node %d, will skip this message",
 906				  le32_to_cpu(hd->h_version), nodeid);
 907			break;
 908		}
 909
 910		ret += msglen;
 911		len -= msglen;
 912		ptr += msglen;
 913	}
 914
 915	return ret;
 916}
 917
 918void dlm_midcomms_unack_msg_resend(int nodeid)
 919{
 920	struct midcomms_node *node;
 921	struct dlm_mhandle *mh;
 922	int idx, ret;
 923
 924	idx = srcu_read_lock(&nodes_srcu);
 925	node = nodeid2node(nodeid);
 926	if (WARN_ON_ONCE(!node)) {
 927		srcu_read_unlock(&nodes_srcu, idx);
 928		return;
 929	}
 930
 931	/* old protocol, we don't support to retransmit on failure */
 932	switch (node->version) {
 933	case DLM_VERSION_3_2:
 934		break;
 935	default:
 936		srcu_read_unlock(&nodes_srcu, idx);
 937		return;
 938	}
 939
 940	rcu_read_lock();
 941	list_for_each_entry_rcu(mh, &node->send_queue, list) {
 942		if (!mh->committed)
 943			continue;
 944
 945		ret = dlm_lowcomms_resend_msg(mh->msg);
 946		if (!ret)
 947			log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d",
 948					      mh->seq, node->nodeid);
 949	}
 950	rcu_read_unlock();
 951	srcu_read_unlock(&nodes_srcu, idx);
 952}
 953
 954static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
 955				 uint32_t seq)
 956{
 957	opts->o_header.h_cmd = DLM_OPTS;
 958	opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
 959	opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
 960	opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
 961	opts->o_header.u.h_seq = cpu_to_le32(seq);
 962}
 963
 964static void midcomms_new_msg_cb(void *data)
 965{
 966	struct dlm_mhandle *mh = data;
 967
 968	atomic_inc(&mh->node->send_queue_cnt);
 969
 970	spin_lock_bh(&mh->node->send_queue_lock);
 971	list_add_tail_rcu(&mh->list, &mh->node->send_queue);
 972	spin_unlock_bh(&mh->node->send_queue_lock);
 973
 974	mh->seq = atomic_fetch_inc(&mh->node->seq_send);
 975}
 976
 977static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
 978						int len, char **ppc)
 979{
 980	struct dlm_opts *opts;
 981	struct dlm_msg *msg;
 982
 983	msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
 984				   ppc, midcomms_new_msg_cb, mh);
 985	if (!msg)
 986		return NULL;
 987
 988	opts = (struct dlm_opts *)*ppc;
 989	mh->opts = opts;
 990
 991	/* add possible options here */
 992	dlm_fill_opts_header(opts, len, mh->seq);
 993
 994	*ppc += sizeof(*opts);
 995	mh->inner_p = (const union dlm_packet *)*ppc;
 996	return msg;
 997}
 998
 999/* avoid false positive for nodes_srcu, unlock happens in
1000 * dlm_midcomms_commit_mhandle which is a must call if success
1001 */
1002#ifndef __CHECKER__
1003struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc)
1004{
1005	struct midcomms_node *node;
1006	struct dlm_mhandle *mh;
1007	struct dlm_msg *msg;
1008	int idx;
1009
1010	idx = srcu_read_lock(&nodes_srcu);
1011	node = nodeid2node(nodeid);
1012	if (WARN_ON_ONCE(!node))
1013		goto err;
1014
1015	/* this is a bug, however we going on and hope it will be resolved */
1016	WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
1017
1018	mh = dlm_allocate_mhandle();
1019	if (!mh)
1020		goto err;
1021
1022	mh->committed = false;
1023	mh->ack_rcv = NULL;
1024	mh->idx = idx;
1025	mh->node = node;
1026
1027	switch (node->version) {
1028	case DLM_VERSION_3_1:
1029		msg = dlm_lowcomms_new_msg(nodeid, len, ppc, NULL, NULL);
1030		if (!msg) {
1031			dlm_free_mhandle(mh);
1032			goto err;
1033		}
1034
1035		break;
1036	case DLM_VERSION_3_2:
1037		/* send ack back if necessary */
1038		dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
1039
1040		msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, ppc);
1041		if (!msg) {
1042			dlm_free_mhandle(mh);
1043			goto err;
1044		}
1045		break;
1046	default:
1047		dlm_free_mhandle(mh);
1048		WARN_ON_ONCE(1);
1049		goto err;
1050	}
1051
1052	mh->msg = msg;
1053
1054	/* keep in mind that is a must to call
1055	 * dlm_midcomms_commit_msg() which releases
1056	 * nodes_srcu using mh->idx which is assumed
1057	 * here that the application will call it.
1058	 */
1059	return mh;
1060
1061err:
1062	srcu_read_unlock(&nodes_srcu, idx);
1063	return NULL;
1064}
1065#endif
1066
1067static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
1068					      const void *name, int namelen)
1069{
1070	switch (mh->inner_p->header.h_cmd) {
1071	case DLM_MSG:
1072		trace_dlm_send_message(mh->node->nodeid, mh->seq,
1073				       &mh->inner_p->message,
1074				       name, namelen);
1075		break;
1076	case DLM_RCOM:
1077		trace_dlm_send_rcom(mh->node->nodeid, mh->seq,
1078				    &mh->inner_p->rcom);
1079		break;
1080	default:
1081		/* nothing to trace */
1082		break;
1083	}
1084}
1085
1086static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
1087					const void *name, int namelen)
1088{
1089	/* nexthdr chain for fast lookup */
1090	mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
1091	mh->committed = true;
1092	dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
1093	dlm_lowcomms_commit_msg(mh->msg);
1094}
1095
1096/* avoid false positive for nodes_srcu, lock was happen in
1097 * dlm_midcomms_get_mhandle
1098 */
1099#ifndef __CHECKER__
1100void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
1101				 const void *name, int namelen)
1102{
1103
1104	switch (mh->node->version) {
1105	case DLM_VERSION_3_1:
1106		srcu_read_unlock(&nodes_srcu, mh->idx);
1107
1108		dlm_lowcomms_commit_msg(mh->msg);
1109		dlm_lowcomms_put_msg(mh->msg);
1110		/* mh is not part of rcu list in this case */
1111		dlm_free_mhandle(mh);
1112		break;
1113	case DLM_VERSION_3_2:
1114		/* held rcu read lock here, because we sending the
1115		 * dlm message out, when we do that we could receive
1116		 * an ack back which releases the mhandle and we
1117		 * get a use after free.
1118		 */
1119		rcu_read_lock();
1120		dlm_midcomms_commit_msg_3_2(mh, name, namelen);
1121		srcu_read_unlock(&nodes_srcu, mh->idx);
1122		rcu_read_unlock();
1123		break;
1124	default:
1125		srcu_read_unlock(&nodes_srcu, mh->idx);
1126		WARN_ON_ONCE(1);
1127		break;
1128	}
1129}
1130#endif
1131
1132int dlm_midcomms_start(void)
1133{
1134	return dlm_lowcomms_start();
1135}
1136
1137void dlm_midcomms_stop(void)
1138{
1139	dlm_lowcomms_stop();
1140}
1141
1142void dlm_midcomms_init(void)
1143{
1144	int i;
1145
1146	for (i = 0; i < CONN_HASH_SIZE; i++)
1147		INIT_HLIST_HEAD(&node_hash[i]);
1148
1149	dlm_lowcomms_init();
1150}
1151
1152static void midcomms_node_release(struct rcu_head *rcu)
1153{
1154	struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
1155
1156	WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
1157	dlm_send_queue_flush(node);
1158	kfree(node);
1159}
1160
1161void dlm_midcomms_exit(void)
1162{
1163	struct midcomms_node *node;
1164	int i, idx;
1165
1166	idx = srcu_read_lock(&nodes_srcu);
1167	for (i = 0; i < CONN_HASH_SIZE; i++) {
1168		hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1169			dlm_delete_debug_comms_file(node->debugfs);
1170
1171			spin_lock(&nodes_lock);
1172			hlist_del_rcu(&node->hlist);
1173			spin_unlock(&nodes_lock);
1174
1175			call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
1176		}
1177	}
1178	srcu_read_unlock(&nodes_srcu, idx);
1179
1180	dlm_lowcomms_exit();
1181}
1182
1183static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
1184{
1185	spin_lock_bh(&node->state_lock);
1186	pr_debug("receive active fin ack from node %d with state %s\n",
1187		 node->nodeid, dlm_state_str(node->state));
1188
1189	switch (node->state) {
1190	case DLM_FIN_WAIT1:
1191		node->state = DLM_FIN_WAIT2;
1192		pr_debug("switch node %d to state %s\n",
1193			 node->nodeid, dlm_state_str(node->state));
1194		break;
1195	case DLM_CLOSING:
1196		midcomms_node_reset(node);
1197		pr_debug("switch node %d to state %s\n",
1198			 node->nodeid, dlm_state_str(node->state));
1199		break;
1200	case DLM_CLOSED:
1201		/* not valid but somehow we got what we want */
1202		wake_up(&node->shutdown_wait);
1203		break;
1204	default:
1205		spin_unlock_bh(&node->state_lock);
1206		log_print("%s: unexpected state: %d",
1207			  __func__, node->state);
1208		WARN_ON_ONCE(1);
1209		return;
1210	}
1211	spin_unlock_bh(&node->state_lock);
1212}
1213
1214void dlm_midcomms_add_member(int nodeid)
1215{
1216	struct midcomms_node *node;
1217	int idx;
1218
1219	idx = srcu_read_lock(&nodes_srcu);
1220	node = nodeid2node(nodeid);
1221	if (WARN_ON_ONCE(!node)) {
1222		srcu_read_unlock(&nodes_srcu, idx);
1223		return;
1224	}
1225
1226	spin_lock_bh(&node->state_lock);
1227	if (!node->users) {
1228		pr_debug("receive add member from node %d with state %s\n",
1229			 node->nodeid, dlm_state_str(node->state));
1230		switch (node->state) {
1231		case DLM_ESTABLISHED:
1232			break;
1233		case DLM_CLOSED:
1234			node->state = DLM_ESTABLISHED;
1235			pr_debug("switch node %d to state %s\n",
1236				 node->nodeid, dlm_state_str(node->state));
1237			break;
1238		default:
1239			/* some invalid state passive shutdown
1240			 * was failed, we try to reset and
1241			 * hope it will go on.
1242			 */
1243			log_print("reset node %d because shutdown stuck",
1244				  node->nodeid);
1245
1246			midcomms_node_reset(node);
1247			node->state = DLM_ESTABLISHED;
1248			break;
1249		}
1250	}
1251
1252	node->users++;
1253	pr_debug("node %d users inc count %d\n", nodeid, node->users);
1254	spin_unlock_bh(&node->state_lock);
1255
1256	srcu_read_unlock(&nodes_srcu, idx);
1257}
1258
1259void dlm_midcomms_remove_member(int nodeid)
1260{
1261	struct midcomms_node *node;
1262	int idx;
1263
1264	idx = srcu_read_lock(&nodes_srcu);
1265	node = nodeid2node(nodeid);
1266	/* in case of dlm_midcomms_close() removes node */
1267	if (!node) {
1268		srcu_read_unlock(&nodes_srcu, idx);
1269		return;
1270	}
1271
1272	spin_lock_bh(&node->state_lock);
1273	/* case of dlm_midcomms_addr() created node but
1274	 * was not added before because dlm_midcomms_close()
1275	 * removed the node
1276	 */
1277	if (!node->users) {
1278		spin_unlock_bh(&node->state_lock);
1279		srcu_read_unlock(&nodes_srcu, idx);
1280		return;
1281	}
1282
1283	node->users--;
1284	pr_debug("node %d users dec count %d\n", nodeid, node->users);
1285
1286	/* hitting users count to zero means the
1287	 * other side is running dlm_midcomms_stop()
1288	 * we meet us to have a clean disconnect.
1289	 */
1290	if (node->users == 0) {
1291		pr_debug("receive remove member from node %d with state %s\n",
1292			 node->nodeid, dlm_state_str(node->state));
1293		switch (node->state) {
1294		case DLM_ESTABLISHED:
1295			break;
1296		case DLM_CLOSE_WAIT:
1297			/* passive shutdown DLM_LAST_ACK case 2 */
1298			node->state = DLM_LAST_ACK;
1299			pr_debug("switch node %d to state %s case 2\n",
1300				 node->nodeid, dlm_state_str(node->state));
1301			set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
1302			dlm_send_fin(node, dlm_pas_fin_ack_rcv);
1303			break;
1304		case DLM_LAST_ACK:
1305			/* probably receive fin caught it, do nothing */
1306			break;
1307		case DLM_CLOSED:
1308			/* already gone, do nothing */
1309			break;
1310		default:
1311			log_print("%s: unexpected state: %d",
1312				  __func__, node->state);
1313			break;
1314		}
1315	}
1316	spin_unlock_bh(&node->state_lock);
1317
1318	srcu_read_unlock(&nodes_srcu, idx);
1319}
1320
1321void dlm_midcomms_version_wait(void)
1322{
1323	struct midcomms_node *node;
1324	int i, idx, ret;
1325
1326	idx = srcu_read_lock(&nodes_srcu);
1327	for (i = 0; i < CONN_HASH_SIZE; i++) {
1328		hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1329			ret = wait_event_timeout(node->shutdown_wait,
1330						 node->version != DLM_VERSION_NOT_SET ||
1331						 node->state == DLM_CLOSED ||
1332						 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
1333						 DLM_SHUTDOWN_TIMEOUT);
1334			if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags))
1335				pr_debug("version wait timed out for node %d with state %s\n",
1336					 node->nodeid, dlm_state_str(node->state));
1337		}
1338	}
1339	srcu_read_unlock(&nodes_srcu, idx);
1340}
1341
1342static void midcomms_shutdown(struct midcomms_node *node)
1343{
1344	int ret;
1345
1346	/* old protocol, we don't wait for pending operations */
1347	switch (node->version) {
1348	case DLM_VERSION_3_2:
1349		break;
1350	default:
1351		return;
1352	}
1353
1354	spin_lock_bh(&node->state_lock);
1355	pr_debug("receive active shutdown for node %d with state %s\n",
1356		 node->nodeid, dlm_state_str(node->state));
1357	switch (node->state) {
1358	case DLM_ESTABLISHED:
1359		node->state = DLM_FIN_WAIT1;
1360		pr_debug("switch node %d to state %s case 2\n",
1361			 node->nodeid, dlm_state_str(node->state));
1362		dlm_send_fin(node, dlm_act_fin_ack_rcv);
1363		break;
1364	case DLM_CLOSED:
1365		/* we have what we want */
1366		break;
1367	default:
1368		/* busy to enter DLM_FIN_WAIT1, wait until passive
1369		 * done in shutdown_wait to enter DLM_CLOSED.
1370		 */
1371		break;
1372	}
1373	spin_unlock_bh(&node->state_lock);
1374
1375	if (DLM_DEBUG_FENCE_TERMINATION)
1376		msleep(5000);
1377
1378	/* wait for other side dlm + fin */
1379	ret = wait_event_timeout(node->shutdown_wait,
1380				 node->state == DLM_CLOSED ||
1381				 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
1382				 DLM_SHUTDOWN_TIMEOUT);
1383	if (!ret)
1384		pr_debug("active shutdown timed out for node %d with state %s\n",
1385			 node->nodeid, dlm_state_str(node->state));
1386	else
1387		pr_debug("active shutdown done for node %d with state %s\n",
1388			 node->nodeid, dlm_state_str(node->state));
1389}
1390
1391void dlm_midcomms_shutdown(void)
1392{
1393	struct midcomms_node *node;
1394	int i, idx;
1395
1396	mutex_lock(&close_lock);
1397	idx = srcu_read_lock(&nodes_srcu);
1398	for (i = 0; i < CONN_HASH_SIZE; i++) {
1399		hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1400			midcomms_shutdown(node);
1401		}
1402	}
1403
1404	dlm_lowcomms_shutdown();
1405
1406	for (i = 0; i < CONN_HASH_SIZE; i++) {
1407		hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1408			midcomms_node_reset(node);
1409		}
1410	}
1411	srcu_read_unlock(&nodes_srcu, idx);
1412	mutex_unlock(&close_lock);
1413}
1414
1415int dlm_midcomms_close(int nodeid)
1416{
1417	struct midcomms_node *node;
1418	int idx, ret;
1419
1420	idx = srcu_read_lock(&nodes_srcu);
1421	/* Abort pending close/remove operation */
1422	node = nodeid2node(nodeid);
1423	if (node) {
1424		/* let shutdown waiters leave */
1425		set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
1426		wake_up(&node->shutdown_wait);
1427	}
1428	srcu_read_unlock(&nodes_srcu, idx);
1429
1430	synchronize_srcu(&nodes_srcu);
1431
1432	mutex_lock(&close_lock);
1433	idx = srcu_read_lock(&nodes_srcu);
1434	node = nodeid2node(nodeid);
1435	if (!node) {
1436		srcu_read_unlock(&nodes_srcu, idx);
1437		mutex_unlock(&close_lock);
1438		return dlm_lowcomms_close(nodeid);
1439	}
1440
1441	ret = dlm_lowcomms_close(nodeid);
1442	dlm_delete_debug_comms_file(node->debugfs);
1443
1444	spin_lock_bh(&nodes_lock);
1445	hlist_del_rcu(&node->hlist);
1446	spin_unlock_bh(&nodes_lock);
1447	srcu_read_unlock(&nodes_srcu, idx);
1448
1449	/* wait that all readers left until flush send queue */
1450	synchronize_srcu(&nodes_srcu);
1451
1452	/* drop all pending dlm messages, this is fine as
1453	 * this function get called when the node is fenced
1454	 */
1455	dlm_send_queue_flush(node);
1456
1457	call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
1458	mutex_unlock(&close_lock);
1459
1460	return ret;
1461}
1462
1463/* debug functionality to send raw dlm msg from user space */
1464struct dlm_rawmsg_data {
1465	struct midcomms_node *node;
1466	void *buf;
1467};
1468
1469static void midcomms_new_rawmsg_cb(void *data)
1470{
1471	struct dlm_rawmsg_data *rd = data;
1472	struct dlm_header *h = rd->buf;
1473
1474	switch (h->h_version) {
1475	case cpu_to_le32(DLM_VERSION_3_1):
1476		break;
1477	default:
1478		switch (h->h_cmd) {
1479		case DLM_OPTS:
1480			if (!h->u.h_seq)
1481				h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send));
1482			break;
1483		default:
1484			break;
1485		}
1486		break;
1487	}
1488}
1489
1490int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
1491			     int buflen)
1492{
1493	struct dlm_rawmsg_data rd;
1494	struct dlm_msg *msg;
1495	char *msgbuf;
1496
1497	rd.node = node;
1498	rd.buf = buf;
1499
1500	msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf,
1501				   midcomms_new_rawmsg_cb, &rd);
1502	if (!msg)
1503		return -ENOMEM;
1504
1505	memcpy(msgbuf, buf, buflen);
1506	dlm_lowcomms_commit_msg(msg);
1507	return 0;
1508}
1509