Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * Copyright (C) 2004, 2005 Oracle. All rights reserved.
6 */
7
8#include <linux/slab.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/configfs.h>
12
13#include "tcp.h"
14#include "nodemanager.h"
15#include "heartbeat.h"
16#include "masklog.h"
17#include "sys.h"
18
19/* for now we operate under the assertion that there can be only one
20 * cluster active at a time. Changing this will require trickling
21 * cluster references throughout where nodes are looked up */
22struct o2nm_cluster *o2nm_single_cluster = NULL;
23
24static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
25 "reset", /* O2NM_FENCE_RESET */
26 "panic", /* O2NM_FENCE_PANIC */
27};
28
29static inline void o2nm_lock_subsystem(void);
30static inline void o2nm_unlock_subsystem(void);
31
32struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
33{
34 struct o2nm_node *node = NULL;
35
36 if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
37 goto out;
38
39 read_lock(&o2nm_single_cluster->cl_nodes_lock);
40 node = o2nm_single_cluster->cl_nodes[node_num];
41 if (node)
42 config_item_get(&node->nd_item);
43 read_unlock(&o2nm_single_cluster->cl_nodes_lock);
44out:
45 return node;
46}
47EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
48
49int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
50{
51 struct o2nm_cluster *cluster = o2nm_single_cluster;
52
53 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
54
55 if (cluster == NULL)
56 return -EINVAL;
57
58 read_lock(&cluster->cl_nodes_lock);
59 memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
60 read_unlock(&cluster->cl_nodes_lock);
61
62 return 0;
63}
64EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
65
66static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
67 __be32 ip_needle,
68 struct rb_node ***ret_p,
69 struct rb_node **ret_parent)
70{
71 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
72 struct rb_node *parent = NULL;
73 struct o2nm_node *node, *ret = NULL;
74
75 while (*p) {
76 int cmp;
77
78 parent = *p;
79 node = rb_entry(parent, struct o2nm_node, nd_ip_node);
80
81 cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
82 sizeof(ip_needle));
83 if (cmp < 0)
84 p = &(*p)->rb_left;
85 else if (cmp > 0)
86 p = &(*p)->rb_right;
87 else {
88 ret = node;
89 break;
90 }
91 }
92
93 if (ret_p != NULL)
94 *ret_p = p;
95 if (ret_parent != NULL)
96 *ret_parent = parent;
97
98 return ret;
99}
100
101struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
102{
103 struct o2nm_node *node = NULL;
104 struct o2nm_cluster *cluster = o2nm_single_cluster;
105
106 if (cluster == NULL)
107 goto out;
108
109 read_lock(&cluster->cl_nodes_lock);
110 node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
111 if (node)
112 config_item_get(&node->nd_item);
113 read_unlock(&cluster->cl_nodes_lock);
114
115out:
116 return node;
117}
118EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
119
120void o2nm_node_put(struct o2nm_node *node)
121{
122 config_item_put(&node->nd_item);
123}
124EXPORT_SYMBOL_GPL(o2nm_node_put);
125
126void o2nm_node_get(struct o2nm_node *node)
127{
128 config_item_get(&node->nd_item);
129}
130EXPORT_SYMBOL_GPL(o2nm_node_get);
131
132u8 o2nm_this_node(void)
133{
134 u8 node_num = O2NM_MAX_NODES;
135
136 if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
137 node_num = o2nm_single_cluster->cl_local_node;
138
139 return node_num;
140}
141EXPORT_SYMBOL_GPL(o2nm_this_node);
142
143/* node configfs bits */
144
145static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
146{
147 return item ?
148 container_of(to_config_group(item), struct o2nm_cluster,
149 cl_group)
150 : NULL;
151}
152
153static struct o2nm_node *to_o2nm_node(struct config_item *item)
154{
155 return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
156}
157
158static void o2nm_node_release(struct config_item *item)
159{
160 struct o2nm_node *node = to_o2nm_node(item);
161 kfree(node);
162}
163
164static ssize_t o2nm_node_num_show(struct config_item *item, char *page)
165{
166 return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num);
167}
168
169static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
170{
171 /* through the first node_set .parent
172 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
173 if (node->nd_item.ci_parent)
174 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
175 else
176 return NULL;
177}
178
179enum {
180 O2NM_NODE_ATTR_NUM = 0,
181 O2NM_NODE_ATTR_PORT,
182 O2NM_NODE_ATTR_ADDRESS,
183};
184
185static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
186 size_t count)
187{
188 struct o2nm_node *node = to_o2nm_node(item);
189 struct o2nm_cluster *cluster;
190 unsigned long tmp;
191 char *p = (char *)page;
192 int ret = 0;
193
194 tmp = simple_strtoul(p, &p, 0);
195 if (!p || (*p && (*p != '\n')))
196 return -EINVAL;
197
198 if (tmp >= O2NM_MAX_NODES)
199 return -ERANGE;
200
201 /* once we're in the cl_nodes tree networking can look us up by
202 * node number and try to use our address and port attributes
203 * to connect to this node.. make sure that they've been set
204 * before writing the node attribute? */
205 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
206 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
207 return -EINVAL; /* XXX */
208
209 o2nm_lock_subsystem();
210 cluster = to_o2nm_cluster_from_node(node);
211 if (!cluster) {
212 o2nm_unlock_subsystem();
213 return -EINVAL;
214 }
215
216 write_lock(&cluster->cl_nodes_lock);
217 if (cluster->cl_nodes[tmp])
218 ret = -EEXIST;
219 else if (test_and_set_bit(O2NM_NODE_ATTR_NUM,
220 &node->nd_set_attributes))
221 ret = -EBUSY;
222 else {
223 cluster->cl_nodes[tmp] = node;
224 node->nd_num = tmp;
225 set_bit(tmp, cluster->cl_nodes_bitmap);
226 }
227 write_unlock(&cluster->cl_nodes_lock);
228 o2nm_unlock_subsystem();
229
230 if (ret)
231 return ret;
232
233 return count;
234}
235static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page)
236{
237 return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port));
238}
239
240static ssize_t o2nm_node_ipv4_port_store(struct config_item *item,
241 const char *page, size_t count)
242{
243 struct o2nm_node *node = to_o2nm_node(item);
244 unsigned long tmp;
245 char *p = (char *)page;
246
247 tmp = simple_strtoul(p, &p, 0);
248 if (!p || (*p && (*p != '\n')))
249 return -EINVAL;
250
251 if (tmp == 0)
252 return -EINVAL;
253 if (tmp >= (u16)-1)
254 return -ERANGE;
255
256 if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
257 return -EBUSY;
258 node->nd_ipv4_port = htons(tmp);
259
260 return count;
261}
262
263static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page)
264{
265 return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address);
266}
267
268static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
269 const char *page,
270 size_t count)
271{
272 struct o2nm_node *node = to_o2nm_node(item);
273 struct o2nm_cluster *cluster;
274 int ret, i;
275 struct rb_node **p, *parent;
276 unsigned int octets[4];
277 __be32 ipv4_addr = 0;
278
279 ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
280 &octets[1], &octets[0]);
281 if (ret != 4)
282 return -EINVAL;
283
284 for (i = 0; i < ARRAY_SIZE(octets); i++) {
285 if (octets[i] > 255)
286 return -ERANGE;
287 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
288 }
289
290 o2nm_lock_subsystem();
291 cluster = to_o2nm_cluster_from_node(node);
292 if (!cluster) {
293 o2nm_unlock_subsystem();
294 return -EINVAL;
295 }
296
297 ret = 0;
298 write_lock(&cluster->cl_nodes_lock);
299 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
300 ret = -EEXIST;
301 else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS,
302 &node->nd_set_attributes))
303 ret = -EBUSY;
304 else {
305 rb_link_node(&node->nd_ip_node, parent, p);
306 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
307 }
308 write_unlock(&cluster->cl_nodes_lock);
309 o2nm_unlock_subsystem();
310
311 if (ret)
312 return ret;
313
314 memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
315
316 return count;
317}
318
319static ssize_t o2nm_node_local_show(struct config_item *item, char *page)
320{
321 return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local);
322}
323
324static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
325 size_t count)
326{
327 struct o2nm_node *node = to_o2nm_node(item);
328 struct o2nm_cluster *cluster;
329 unsigned long tmp;
330 char *p = (char *)page;
331 ssize_t ret;
332
333 tmp = simple_strtoul(p, &p, 0);
334 if (!p || (*p && (*p != '\n')))
335 return -EINVAL;
336
337 tmp = !!tmp; /* boolean of whether this node wants to be local */
338
339 /* setting local turns on networking rx for now so we require having
340 * set everything else first */
341 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
342 !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
343 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
344 return -EINVAL; /* XXX */
345
346 o2nm_lock_subsystem();
347 cluster = to_o2nm_cluster_from_node(node);
348 if (!cluster) {
349 ret = -EINVAL;
350 goto out;
351 }
352
353 /* the only failure case is trying to set a new local node
354 * when a different one is already set */
355 if (tmp && tmp == cluster->cl_has_local &&
356 cluster->cl_local_node != node->nd_num) {
357 ret = -EBUSY;
358 goto out;
359 }
360
361 /* bring up the rx thread if we're setting the new local node. */
362 if (tmp && !cluster->cl_has_local) {
363 ret = o2net_start_listening(node);
364 if (ret)
365 goto out;
366 }
367
368 if (!tmp && cluster->cl_has_local &&
369 cluster->cl_local_node == node->nd_num) {
370 o2net_stop_listening(node);
371 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
372 }
373
374 node->nd_local = tmp;
375 if (node->nd_local) {
376 cluster->cl_has_local = tmp;
377 cluster->cl_local_node = node->nd_num;
378 }
379
380 ret = count;
381
382out:
383 o2nm_unlock_subsystem();
384 return ret;
385}
386
387CONFIGFS_ATTR(o2nm_node_, num);
388CONFIGFS_ATTR(o2nm_node_, ipv4_port);
389CONFIGFS_ATTR(o2nm_node_, ipv4_address);
390CONFIGFS_ATTR(o2nm_node_, local);
391
392static struct configfs_attribute *o2nm_node_attrs[] = {
393 &o2nm_node_attr_num,
394 &o2nm_node_attr_ipv4_port,
395 &o2nm_node_attr_ipv4_address,
396 &o2nm_node_attr_local,
397 NULL,
398};
399
400static struct configfs_item_operations o2nm_node_item_ops = {
401 .release = o2nm_node_release,
402};
403
404static const struct config_item_type o2nm_node_type = {
405 .ct_item_ops = &o2nm_node_item_ops,
406 .ct_attrs = o2nm_node_attrs,
407 .ct_owner = THIS_MODULE,
408};
409
410/* node set */
411
412struct o2nm_node_group {
413 struct config_group ns_group;
414 /* some stuff? */
415};
416
417#if 0
418static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
419{
420 return group ?
421 container_of(group, struct o2nm_node_group, ns_group)
422 : NULL;
423}
424#endif
425
426static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
427 unsigned int *val)
428{
429 unsigned long tmp;
430 char *p = (char *)page;
431
432 tmp = simple_strtoul(p, &p, 0);
433 if (!p || (*p && (*p != '\n')))
434 return -EINVAL;
435
436 if (tmp == 0)
437 return -EINVAL;
438 if (tmp >= (u32)-1)
439 return -ERANGE;
440
441 *val = tmp;
442
443 return count;
444}
445
446static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item,
447 char *page)
448{
449 return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms);
450}
451
452static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item,
453 const char *page, size_t count)
454{
455 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
456 ssize_t ret;
457 unsigned int val;
458
459 ret = o2nm_cluster_attr_write(page, count, &val);
460
461 if (ret > 0) {
462 if (cluster->cl_idle_timeout_ms != val
463 && o2net_num_connected_peers()) {
464 mlog(ML_NOTICE,
465 "o2net: cannot change idle timeout after "
466 "the first peer has agreed to it."
467 " %d connected peers\n",
468 o2net_num_connected_peers());
469 ret = -EINVAL;
470 } else if (val <= cluster->cl_keepalive_delay_ms) {
471 mlog(ML_NOTICE, "o2net: idle timeout must be larger "
472 "than keepalive delay\n");
473 ret = -EINVAL;
474 } else {
475 cluster->cl_idle_timeout_ms = val;
476 }
477 }
478
479 return ret;
480}
481
482static ssize_t o2nm_cluster_keepalive_delay_ms_show(
483 struct config_item *item, char *page)
484{
485 return sprintf(page, "%u\n",
486 to_o2nm_cluster(item)->cl_keepalive_delay_ms);
487}
488
489static ssize_t o2nm_cluster_keepalive_delay_ms_store(
490 struct config_item *item, const char *page, size_t count)
491{
492 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
493 ssize_t ret;
494 unsigned int val;
495
496 ret = o2nm_cluster_attr_write(page, count, &val);
497
498 if (ret > 0) {
499 if (cluster->cl_keepalive_delay_ms != val
500 && o2net_num_connected_peers()) {
501 mlog(ML_NOTICE,
502 "o2net: cannot change keepalive delay after"
503 " the first peer has agreed to it."
504 " %d connected peers\n",
505 o2net_num_connected_peers());
506 ret = -EINVAL;
507 } else if (val >= cluster->cl_idle_timeout_ms) {
508 mlog(ML_NOTICE, "o2net: keepalive delay must be "
509 "smaller than idle timeout\n");
510 ret = -EINVAL;
511 } else {
512 cluster->cl_keepalive_delay_ms = val;
513 }
514 }
515
516 return ret;
517}
518
519static ssize_t o2nm_cluster_reconnect_delay_ms_show(
520 struct config_item *item, char *page)
521{
522 return sprintf(page, "%u\n",
523 to_o2nm_cluster(item)->cl_reconnect_delay_ms);
524}
525
526static ssize_t o2nm_cluster_reconnect_delay_ms_store(
527 struct config_item *item, const char *page, size_t count)
528{
529 return o2nm_cluster_attr_write(page, count,
530 &to_o2nm_cluster(item)->cl_reconnect_delay_ms);
531}
532
533static ssize_t o2nm_cluster_fence_method_show(
534 struct config_item *item, char *page)
535{
536 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
537 ssize_t ret = 0;
538
539 if (cluster)
540 ret = sprintf(page, "%s\n",
541 o2nm_fence_method_desc[cluster->cl_fence_method]);
542 return ret;
543}
544
545static ssize_t o2nm_cluster_fence_method_store(
546 struct config_item *item, const char *page, size_t count)
547{
548 unsigned int i;
549
550 if (page[count - 1] != '\n')
551 goto bail;
552
553 for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
554 if (count != strlen(o2nm_fence_method_desc[i]) + 1)
555 continue;
556 if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
557 continue;
558 if (to_o2nm_cluster(item)->cl_fence_method != i) {
559 printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
560 o2nm_fence_method_desc[i]);
561 to_o2nm_cluster(item)->cl_fence_method = i;
562 }
563 return count;
564 }
565
566bail:
567 return -EINVAL;
568}
569
570CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms);
571CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms);
572CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms);
573CONFIGFS_ATTR(o2nm_cluster_, fence_method);
574
575static struct configfs_attribute *o2nm_cluster_attrs[] = {
576 &o2nm_cluster_attr_idle_timeout_ms,
577 &o2nm_cluster_attr_keepalive_delay_ms,
578 &o2nm_cluster_attr_reconnect_delay_ms,
579 &o2nm_cluster_attr_fence_method,
580 NULL,
581};
582
583static struct config_item *o2nm_node_group_make_item(struct config_group *group,
584 const char *name)
585{
586 struct o2nm_node *node = NULL;
587
588 if (strlen(name) > O2NM_MAX_NAME_LEN)
589 return ERR_PTR(-ENAMETOOLONG);
590
591 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
592 if (node == NULL)
593 return ERR_PTR(-ENOMEM);
594
595 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
596 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
597 spin_lock_init(&node->nd_lock);
598
599 mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
600
601 return &node->nd_item;
602}
603
604static void o2nm_node_group_drop_item(struct config_group *group,
605 struct config_item *item)
606{
607 struct o2nm_node *node = to_o2nm_node(item);
608 struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
609
610 if (cluster->cl_nodes[node->nd_num] == node) {
611 o2net_disconnect_node(node);
612
613 if (cluster->cl_has_local &&
614 (cluster->cl_local_node == node->nd_num)) {
615 cluster->cl_has_local = 0;
616 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
617 o2net_stop_listening(node);
618 }
619 }
620
621 /* XXX call into net to stop this node from trading messages */
622
623 write_lock(&cluster->cl_nodes_lock);
624
625 /* XXX sloppy */
626 if (node->nd_ipv4_address)
627 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
628
629 /* nd_num might be 0 if the node number hasn't been set.. */
630 if (cluster->cl_nodes[node->nd_num] == node) {
631 cluster->cl_nodes[node->nd_num] = NULL;
632 clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
633 }
634 write_unlock(&cluster->cl_nodes_lock);
635
636 mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
637 config_item_name(&node->nd_item));
638
639 config_item_put(item);
640}
641
642static struct configfs_group_operations o2nm_node_group_group_ops = {
643 .make_item = o2nm_node_group_make_item,
644 .drop_item = o2nm_node_group_drop_item,
645};
646
647static const struct config_item_type o2nm_node_group_type = {
648 .ct_group_ops = &o2nm_node_group_group_ops,
649 .ct_owner = THIS_MODULE,
650};
651
652/* cluster */
653
654static void o2nm_cluster_release(struct config_item *item)
655{
656 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
657
658 kfree(cluster);
659}
660
661static struct configfs_item_operations o2nm_cluster_item_ops = {
662 .release = o2nm_cluster_release,
663};
664
665static const struct config_item_type o2nm_cluster_type = {
666 .ct_item_ops = &o2nm_cluster_item_ops,
667 .ct_attrs = o2nm_cluster_attrs,
668 .ct_owner = THIS_MODULE,
669};
670
671/* cluster set */
672
673struct o2nm_cluster_group {
674 struct configfs_subsystem cs_subsys;
675 /* some stuff? */
676};
677
678#if 0
679static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
680{
681 return group ?
682 container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
683 : NULL;
684}
685#endif
686
687static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
688 const char *name)
689{
690 struct o2nm_cluster *cluster = NULL;
691 struct o2nm_node_group *ns = NULL;
692 struct config_group *o2hb_group = NULL, *ret = NULL;
693
694 /* this runs under the parent dir's i_mutex; there can be only
695 * one caller in here at a time */
696 if (o2nm_single_cluster)
697 return ERR_PTR(-ENOSPC);
698
699 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
700 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
701 o2hb_group = o2hb_alloc_hb_set();
702 if (cluster == NULL || ns == NULL || o2hb_group == NULL)
703 goto out;
704
705 config_group_init_type_name(&cluster->cl_group, name,
706 &o2nm_cluster_type);
707 configfs_add_default_group(&ns->ns_group, &cluster->cl_group);
708
709 config_group_init_type_name(&ns->ns_group, "node",
710 &o2nm_node_group_type);
711 configfs_add_default_group(o2hb_group, &cluster->cl_group);
712
713 rwlock_init(&cluster->cl_nodes_lock);
714 cluster->cl_node_ip_tree = RB_ROOT;
715 cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
716 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
717 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
718 cluster->cl_fence_method = O2NM_FENCE_RESET;
719
720 ret = &cluster->cl_group;
721 o2nm_single_cluster = cluster;
722
723out:
724 if (ret == NULL) {
725 kfree(cluster);
726 kfree(ns);
727 o2hb_free_hb_set(o2hb_group);
728 ret = ERR_PTR(-ENOMEM);
729 }
730
731 return ret;
732}
733
734static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
735{
736 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
737
738 BUG_ON(o2nm_single_cluster != cluster);
739 o2nm_single_cluster = NULL;
740
741 configfs_remove_default_groups(&cluster->cl_group);
742 config_item_put(item);
743}
744
745static struct configfs_group_operations o2nm_cluster_group_group_ops = {
746 .make_group = o2nm_cluster_group_make_group,
747 .drop_item = o2nm_cluster_group_drop_item,
748};
749
750static const struct config_item_type o2nm_cluster_group_type = {
751 .ct_group_ops = &o2nm_cluster_group_group_ops,
752 .ct_owner = THIS_MODULE,
753};
754
755static struct o2nm_cluster_group o2nm_cluster_group = {
756 .cs_subsys = {
757 .su_group = {
758 .cg_item = {
759 .ci_namebuf = "cluster",
760 .ci_type = &o2nm_cluster_group_type,
761 },
762 },
763 },
764};
765
766static inline void o2nm_lock_subsystem(void)
767{
768 mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
769}
770
771static inline void o2nm_unlock_subsystem(void)
772{
773 mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
774}
775
776int o2nm_depend_item(struct config_item *item)
777{
778 return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
779}
780
781void o2nm_undepend_item(struct config_item *item)
782{
783 configfs_undepend_item(item);
784}
785
786int o2nm_depend_this_node(void)
787{
788 int ret = 0;
789 struct o2nm_node *local_node;
790
791 local_node = o2nm_get_node_by_num(o2nm_this_node());
792 if (!local_node) {
793 ret = -EINVAL;
794 goto out;
795 }
796
797 ret = o2nm_depend_item(&local_node->nd_item);
798 o2nm_node_put(local_node);
799
800out:
801 return ret;
802}
803
804void o2nm_undepend_this_node(void)
805{
806 struct o2nm_node *local_node;
807
808 local_node = o2nm_get_node_by_num(o2nm_this_node());
809 BUG_ON(!local_node);
810
811 o2nm_undepend_item(&local_node->nd_item);
812 o2nm_node_put(local_node);
813}
814
815
816static void __exit exit_o2nm(void)
817{
818 /* XXX sync with hb callbacks and shut down hb? */
819 o2net_unregister_hb_callbacks();
820 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
821 o2cb_sys_shutdown();
822
823 o2net_exit();
824 o2hb_exit();
825}
826
827static int __init init_o2nm(void)
828{
829 int ret = -1;
830
831 o2hb_init();
832
833 ret = o2net_init();
834 if (ret)
835 goto out_o2hb;
836
837 ret = o2net_register_hb_callbacks();
838 if (ret)
839 goto out_o2net;
840
841 config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
842 mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
843 ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
844 if (ret) {
845 printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
846 goto out_callbacks;
847 }
848
849 ret = o2cb_sys_init();
850 if (!ret)
851 goto out;
852
853 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
854out_callbacks:
855 o2net_unregister_hb_callbacks();
856out_o2net:
857 o2net_exit();
858out_o2hb:
859 o2hb_exit();
860out:
861 return ret;
862}
863
864MODULE_AUTHOR("Oracle");
865MODULE_LICENSE("GPL");
866MODULE_DESCRIPTION("OCFS2 cluster management");
867
868module_init(init_o2nm)
869module_exit(exit_o2nm)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2004, 2005 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/configfs.h>
10
11#include "tcp.h"
12#include "nodemanager.h"
13#include "heartbeat.h"
14#include "masklog.h"
15#include "sys.h"
16
17/* for now we operate under the assertion that there can be only one
18 * cluster active at a time. Changing this will require trickling
19 * cluster references throughout where nodes are looked up */
20struct o2nm_cluster *o2nm_single_cluster = NULL;
21
22static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
23 "reset", /* O2NM_FENCE_RESET */
24 "panic", /* O2NM_FENCE_PANIC */
25};
26
27static inline void o2nm_lock_subsystem(void);
28static inline void o2nm_unlock_subsystem(void);
29
30struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
31{
32 struct o2nm_node *node = NULL;
33
34 if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
35 goto out;
36
37 read_lock(&o2nm_single_cluster->cl_nodes_lock);
38 node = o2nm_single_cluster->cl_nodes[node_num];
39 if (node)
40 config_item_get(&node->nd_item);
41 read_unlock(&o2nm_single_cluster->cl_nodes_lock);
42out:
43 return node;
44}
45EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
46
47int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
48{
49 struct o2nm_cluster *cluster = o2nm_single_cluster;
50
51 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
52
53 if (cluster == NULL)
54 return -EINVAL;
55
56 read_lock(&cluster->cl_nodes_lock);
57 bitmap_copy(map, cluster->cl_nodes_bitmap, O2NM_MAX_NODES);
58 read_unlock(&cluster->cl_nodes_lock);
59
60 return 0;
61}
62EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
63
64static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
65 __be32 ip_needle,
66 struct rb_node ***ret_p,
67 struct rb_node **ret_parent)
68{
69 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
70 struct rb_node *parent = NULL;
71 struct o2nm_node *node, *ret = NULL;
72
73 while (*p) {
74 int cmp;
75
76 parent = *p;
77 node = rb_entry(parent, struct o2nm_node, nd_ip_node);
78
79 cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
80 sizeof(ip_needle));
81 if (cmp < 0)
82 p = &(*p)->rb_left;
83 else if (cmp > 0)
84 p = &(*p)->rb_right;
85 else {
86 ret = node;
87 break;
88 }
89 }
90
91 if (ret_p != NULL)
92 *ret_p = p;
93 if (ret_parent != NULL)
94 *ret_parent = parent;
95
96 return ret;
97}
98
99struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
100{
101 struct o2nm_node *node = NULL;
102 struct o2nm_cluster *cluster = o2nm_single_cluster;
103
104 if (cluster == NULL)
105 goto out;
106
107 read_lock(&cluster->cl_nodes_lock);
108 node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
109 if (node)
110 config_item_get(&node->nd_item);
111 read_unlock(&cluster->cl_nodes_lock);
112
113out:
114 return node;
115}
116EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
117
118void o2nm_node_put(struct o2nm_node *node)
119{
120 config_item_put(&node->nd_item);
121}
122EXPORT_SYMBOL_GPL(o2nm_node_put);
123
124void o2nm_node_get(struct o2nm_node *node)
125{
126 config_item_get(&node->nd_item);
127}
128EXPORT_SYMBOL_GPL(o2nm_node_get);
129
130u8 o2nm_this_node(void)
131{
132 u8 node_num = O2NM_MAX_NODES;
133
134 if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
135 node_num = o2nm_single_cluster->cl_local_node;
136
137 return node_num;
138}
139EXPORT_SYMBOL_GPL(o2nm_this_node);
140
141/* node configfs bits */
142
143static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
144{
145 return item ?
146 container_of(to_config_group(item), struct o2nm_cluster,
147 cl_group)
148 : NULL;
149}
150
151static struct o2nm_node *to_o2nm_node(struct config_item *item)
152{
153 return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
154}
155
156static void o2nm_node_release(struct config_item *item)
157{
158 struct o2nm_node *node = to_o2nm_node(item);
159 kfree(node);
160}
161
162static ssize_t o2nm_node_num_show(struct config_item *item, char *page)
163{
164 return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num);
165}
166
167static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
168{
169 /* through the first node_set .parent
170 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
171 if (node->nd_item.ci_parent)
172 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
173 else
174 return NULL;
175}
176
177enum {
178 O2NM_NODE_ATTR_NUM = 0,
179 O2NM_NODE_ATTR_PORT,
180 O2NM_NODE_ATTR_ADDRESS,
181};
182
183static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
184 size_t count)
185{
186 struct o2nm_node *node = to_o2nm_node(item);
187 struct o2nm_cluster *cluster;
188 unsigned long tmp;
189 char *p = (char *)page;
190 int ret = 0;
191
192 tmp = simple_strtoul(p, &p, 0);
193 if (!p || (*p && (*p != '\n')))
194 return -EINVAL;
195
196 if (tmp >= O2NM_MAX_NODES)
197 return -ERANGE;
198
199 /* once we're in the cl_nodes tree networking can look us up by
200 * node number and try to use our address and port attributes
201 * to connect to this node.. make sure that they've been set
202 * before writing the node attribute? */
203 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
204 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
205 return -EINVAL; /* XXX */
206
207 o2nm_lock_subsystem();
208 cluster = to_o2nm_cluster_from_node(node);
209 if (!cluster) {
210 o2nm_unlock_subsystem();
211 return -EINVAL;
212 }
213
214 write_lock(&cluster->cl_nodes_lock);
215 if (cluster->cl_nodes[tmp])
216 ret = -EEXIST;
217 else if (test_and_set_bit(O2NM_NODE_ATTR_NUM,
218 &node->nd_set_attributes))
219 ret = -EBUSY;
220 else {
221 cluster->cl_nodes[tmp] = node;
222 node->nd_num = tmp;
223 set_bit(tmp, cluster->cl_nodes_bitmap);
224 }
225 write_unlock(&cluster->cl_nodes_lock);
226 o2nm_unlock_subsystem();
227
228 if (ret)
229 return ret;
230
231 return count;
232}
233static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page)
234{
235 return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port));
236}
237
238static ssize_t o2nm_node_ipv4_port_store(struct config_item *item,
239 const char *page, size_t count)
240{
241 struct o2nm_node *node = to_o2nm_node(item);
242 unsigned long tmp;
243 char *p = (char *)page;
244
245 tmp = simple_strtoul(p, &p, 0);
246 if (!p || (*p && (*p != '\n')))
247 return -EINVAL;
248
249 if (tmp == 0)
250 return -EINVAL;
251 if (tmp >= (u16)-1)
252 return -ERANGE;
253
254 if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
255 return -EBUSY;
256 node->nd_ipv4_port = htons(tmp);
257
258 return count;
259}
260
261static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page)
262{
263 return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address);
264}
265
266static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
267 const char *page,
268 size_t count)
269{
270 struct o2nm_node *node = to_o2nm_node(item);
271 struct o2nm_cluster *cluster;
272 int ret, i;
273 struct rb_node **p, *parent;
274 unsigned int octets[4];
275 __be32 ipv4_addr = 0;
276
277 ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
278 &octets[1], &octets[0]);
279 if (ret != 4)
280 return -EINVAL;
281
282 for (i = 0; i < ARRAY_SIZE(octets); i++) {
283 if (octets[i] > 255)
284 return -ERANGE;
285 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
286 }
287
288 o2nm_lock_subsystem();
289 cluster = to_o2nm_cluster_from_node(node);
290 if (!cluster) {
291 o2nm_unlock_subsystem();
292 return -EINVAL;
293 }
294
295 ret = 0;
296 write_lock(&cluster->cl_nodes_lock);
297 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
298 ret = -EEXIST;
299 else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS,
300 &node->nd_set_attributes))
301 ret = -EBUSY;
302 else {
303 rb_link_node(&node->nd_ip_node, parent, p);
304 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
305 }
306 write_unlock(&cluster->cl_nodes_lock);
307 o2nm_unlock_subsystem();
308
309 if (ret)
310 return ret;
311
312 memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
313
314 return count;
315}
316
317static ssize_t o2nm_node_local_show(struct config_item *item, char *page)
318{
319 return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local);
320}
321
322static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
323 size_t count)
324{
325 struct o2nm_node *node = to_o2nm_node(item);
326 struct o2nm_cluster *cluster;
327 unsigned long tmp;
328 char *p = (char *)page;
329 ssize_t ret;
330
331 tmp = simple_strtoul(p, &p, 0);
332 if (!p || (*p && (*p != '\n')))
333 return -EINVAL;
334
335 tmp = !!tmp; /* boolean of whether this node wants to be local */
336
337 /* setting local turns on networking rx for now so we require having
338 * set everything else first */
339 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
340 !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
341 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
342 return -EINVAL; /* XXX */
343
344 o2nm_lock_subsystem();
345 cluster = to_o2nm_cluster_from_node(node);
346 if (!cluster) {
347 ret = -EINVAL;
348 goto out;
349 }
350
351 /* the only failure case is trying to set a new local node
352 * when a different one is already set */
353 if (tmp && tmp == cluster->cl_has_local &&
354 cluster->cl_local_node != node->nd_num) {
355 ret = -EBUSY;
356 goto out;
357 }
358
359 /* bring up the rx thread if we're setting the new local node. */
360 if (tmp && !cluster->cl_has_local) {
361 ret = o2net_start_listening(node);
362 if (ret)
363 goto out;
364 }
365
366 if (!tmp && cluster->cl_has_local &&
367 cluster->cl_local_node == node->nd_num) {
368 o2net_stop_listening(node);
369 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
370 }
371
372 node->nd_local = tmp;
373 if (node->nd_local) {
374 cluster->cl_has_local = tmp;
375 cluster->cl_local_node = node->nd_num;
376 }
377
378 ret = count;
379
380out:
381 o2nm_unlock_subsystem();
382 return ret;
383}
384
385CONFIGFS_ATTR(o2nm_node_, num);
386CONFIGFS_ATTR(o2nm_node_, ipv4_port);
387CONFIGFS_ATTR(o2nm_node_, ipv4_address);
388CONFIGFS_ATTR(o2nm_node_, local);
389
390static struct configfs_attribute *o2nm_node_attrs[] = {
391 &o2nm_node_attr_num,
392 &o2nm_node_attr_ipv4_port,
393 &o2nm_node_attr_ipv4_address,
394 &o2nm_node_attr_local,
395 NULL,
396};
397
398static struct configfs_item_operations o2nm_node_item_ops = {
399 .release = o2nm_node_release,
400};
401
402static const struct config_item_type o2nm_node_type = {
403 .ct_item_ops = &o2nm_node_item_ops,
404 .ct_attrs = o2nm_node_attrs,
405 .ct_owner = THIS_MODULE,
406};
407
408/* node set */
409
410struct o2nm_node_group {
411 struct config_group ns_group;
412 /* some stuff? */
413};
414
415#if 0
416static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
417{
418 return group ?
419 container_of(group, struct o2nm_node_group, ns_group)
420 : NULL;
421}
422#endif
423
424static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
425 unsigned int *val)
426{
427 unsigned long tmp;
428 char *p = (char *)page;
429
430 tmp = simple_strtoul(p, &p, 0);
431 if (!p || (*p && (*p != '\n')))
432 return -EINVAL;
433
434 if (tmp == 0)
435 return -EINVAL;
436 if (tmp >= (u32)-1)
437 return -ERANGE;
438
439 *val = tmp;
440
441 return count;
442}
443
444static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item,
445 char *page)
446{
447 return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms);
448}
449
450static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item,
451 const char *page, size_t count)
452{
453 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
454 ssize_t ret;
455 unsigned int val;
456
457 ret = o2nm_cluster_attr_write(page, count, &val);
458
459 if (ret > 0) {
460 if (cluster->cl_idle_timeout_ms != val
461 && o2net_num_connected_peers()) {
462 mlog(ML_NOTICE,
463 "o2net: cannot change idle timeout after "
464 "the first peer has agreed to it."
465 " %d connected peers\n",
466 o2net_num_connected_peers());
467 ret = -EINVAL;
468 } else if (val <= cluster->cl_keepalive_delay_ms) {
469 mlog(ML_NOTICE, "o2net: idle timeout must be larger "
470 "than keepalive delay\n");
471 ret = -EINVAL;
472 } else {
473 cluster->cl_idle_timeout_ms = val;
474 }
475 }
476
477 return ret;
478}
479
480static ssize_t o2nm_cluster_keepalive_delay_ms_show(
481 struct config_item *item, char *page)
482{
483 return sprintf(page, "%u\n",
484 to_o2nm_cluster(item)->cl_keepalive_delay_ms);
485}
486
487static ssize_t o2nm_cluster_keepalive_delay_ms_store(
488 struct config_item *item, const char *page, size_t count)
489{
490 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
491 ssize_t ret;
492 unsigned int val;
493
494 ret = o2nm_cluster_attr_write(page, count, &val);
495
496 if (ret > 0) {
497 if (cluster->cl_keepalive_delay_ms != val
498 && o2net_num_connected_peers()) {
499 mlog(ML_NOTICE,
500 "o2net: cannot change keepalive delay after"
501 " the first peer has agreed to it."
502 " %d connected peers\n",
503 o2net_num_connected_peers());
504 ret = -EINVAL;
505 } else if (val >= cluster->cl_idle_timeout_ms) {
506 mlog(ML_NOTICE, "o2net: keepalive delay must be "
507 "smaller than idle timeout\n");
508 ret = -EINVAL;
509 } else {
510 cluster->cl_keepalive_delay_ms = val;
511 }
512 }
513
514 return ret;
515}
516
517static ssize_t o2nm_cluster_reconnect_delay_ms_show(
518 struct config_item *item, char *page)
519{
520 return sprintf(page, "%u\n",
521 to_o2nm_cluster(item)->cl_reconnect_delay_ms);
522}
523
524static ssize_t o2nm_cluster_reconnect_delay_ms_store(
525 struct config_item *item, const char *page, size_t count)
526{
527 return o2nm_cluster_attr_write(page, count,
528 &to_o2nm_cluster(item)->cl_reconnect_delay_ms);
529}
530
531static ssize_t o2nm_cluster_fence_method_show(
532 struct config_item *item, char *page)
533{
534 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
535 ssize_t ret = 0;
536
537 if (cluster)
538 ret = sprintf(page, "%s\n",
539 o2nm_fence_method_desc[cluster->cl_fence_method]);
540 return ret;
541}
542
543static ssize_t o2nm_cluster_fence_method_store(
544 struct config_item *item, const char *page, size_t count)
545{
546 unsigned int i;
547
548 if (page[count - 1] != '\n')
549 goto bail;
550
551 for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
552 if (count != strlen(o2nm_fence_method_desc[i]) + 1)
553 continue;
554 if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
555 continue;
556 if (to_o2nm_cluster(item)->cl_fence_method != i) {
557 printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
558 o2nm_fence_method_desc[i]);
559 to_o2nm_cluster(item)->cl_fence_method = i;
560 }
561 return count;
562 }
563
564bail:
565 return -EINVAL;
566}
567
568CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms);
569CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms);
570CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms);
571CONFIGFS_ATTR(o2nm_cluster_, fence_method);
572
573static struct configfs_attribute *o2nm_cluster_attrs[] = {
574 &o2nm_cluster_attr_idle_timeout_ms,
575 &o2nm_cluster_attr_keepalive_delay_ms,
576 &o2nm_cluster_attr_reconnect_delay_ms,
577 &o2nm_cluster_attr_fence_method,
578 NULL,
579};
580
581static struct config_item *o2nm_node_group_make_item(struct config_group *group,
582 const char *name)
583{
584 struct o2nm_node *node = NULL;
585
586 if (strlen(name) > O2NM_MAX_NAME_LEN)
587 return ERR_PTR(-ENAMETOOLONG);
588
589 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
590 if (node == NULL)
591 return ERR_PTR(-ENOMEM);
592
593 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
594 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
595 spin_lock_init(&node->nd_lock);
596
597 mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
598
599 return &node->nd_item;
600}
601
602static void o2nm_node_group_drop_item(struct config_group *group,
603 struct config_item *item)
604{
605 struct o2nm_node *node = to_o2nm_node(item);
606 struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
607
608 if (cluster->cl_nodes[node->nd_num] == node) {
609 o2net_disconnect_node(node);
610
611 if (cluster->cl_has_local &&
612 (cluster->cl_local_node == node->nd_num)) {
613 cluster->cl_has_local = 0;
614 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
615 o2net_stop_listening(node);
616 }
617 }
618
619 /* XXX call into net to stop this node from trading messages */
620
621 write_lock(&cluster->cl_nodes_lock);
622
623 /* XXX sloppy */
624 if (node->nd_ipv4_address)
625 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
626
627 /* nd_num might be 0 if the node number hasn't been set.. */
628 if (cluster->cl_nodes[node->nd_num] == node) {
629 cluster->cl_nodes[node->nd_num] = NULL;
630 clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
631 }
632 write_unlock(&cluster->cl_nodes_lock);
633
634 mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
635 config_item_name(&node->nd_item));
636
637 config_item_put(item);
638}
639
640static struct configfs_group_operations o2nm_node_group_group_ops = {
641 .make_item = o2nm_node_group_make_item,
642 .drop_item = o2nm_node_group_drop_item,
643};
644
645static const struct config_item_type o2nm_node_group_type = {
646 .ct_group_ops = &o2nm_node_group_group_ops,
647 .ct_owner = THIS_MODULE,
648};
649
650/* cluster */
651
652static void o2nm_cluster_release(struct config_item *item)
653{
654 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
655
656 kfree(cluster);
657}
658
659static struct configfs_item_operations o2nm_cluster_item_ops = {
660 .release = o2nm_cluster_release,
661};
662
663static const struct config_item_type o2nm_cluster_type = {
664 .ct_item_ops = &o2nm_cluster_item_ops,
665 .ct_attrs = o2nm_cluster_attrs,
666 .ct_owner = THIS_MODULE,
667};
668
669/* cluster set */
670
671struct o2nm_cluster_group {
672 struct configfs_subsystem cs_subsys;
673 /* some stuff? */
674};
675
676#if 0
677static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
678{
679 return group ?
680 container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
681 : NULL;
682}
683#endif
684
685static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
686 const char *name)
687{
688 struct o2nm_cluster *cluster = NULL;
689 struct o2nm_node_group *ns = NULL;
690 struct config_group *o2hb_group = NULL, *ret = NULL;
691
692 /* this runs under the parent dir's i_rwsem; there can be only
693 * one caller in here at a time */
694 if (o2nm_single_cluster)
695 return ERR_PTR(-ENOSPC);
696
697 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
698 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
699 o2hb_group = o2hb_alloc_hb_set();
700 if (cluster == NULL || ns == NULL || o2hb_group == NULL)
701 goto out;
702
703 config_group_init_type_name(&cluster->cl_group, name,
704 &o2nm_cluster_type);
705 configfs_add_default_group(&ns->ns_group, &cluster->cl_group);
706
707 config_group_init_type_name(&ns->ns_group, "node",
708 &o2nm_node_group_type);
709 configfs_add_default_group(o2hb_group, &cluster->cl_group);
710
711 rwlock_init(&cluster->cl_nodes_lock);
712 cluster->cl_node_ip_tree = RB_ROOT;
713 cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
714 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
715 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
716 cluster->cl_fence_method = O2NM_FENCE_RESET;
717
718 ret = &cluster->cl_group;
719 o2nm_single_cluster = cluster;
720
721out:
722 if (ret == NULL) {
723 kfree(cluster);
724 kfree(ns);
725 o2hb_free_hb_set(o2hb_group);
726 ret = ERR_PTR(-ENOMEM);
727 }
728
729 return ret;
730}
731
732static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
733{
734 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
735
736 BUG_ON(o2nm_single_cluster != cluster);
737 o2nm_single_cluster = NULL;
738
739 configfs_remove_default_groups(&cluster->cl_group);
740 config_item_put(item);
741}
742
743static struct configfs_group_operations o2nm_cluster_group_group_ops = {
744 .make_group = o2nm_cluster_group_make_group,
745 .drop_item = o2nm_cluster_group_drop_item,
746};
747
748static const struct config_item_type o2nm_cluster_group_type = {
749 .ct_group_ops = &o2nm_cluster_group_group_ops,
750 .ct_owner = THIS_MODULE,
751};
752
753static struct o2nm_cluster_group o2nm_cluster_group = {
754 .cs_subsys = {
755 .su_group = {
756 .cg_item = {
757 .ci_namebuf = "cluster",
758 .ci_type = &o2nm_cluster_group_type,
759 },
760 },
761 },
762};
763
764static inline void o2nm_lock_subsystem(void)
765{
766 mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
767}
768
769static inline void o2nm_unlock_subsystem(void)
770{
771 mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
772}
773
774int o2nm_depend_item(struct config_item *item)
775{
776 return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
777}
778
779void o2nm_undepend_item(struct config_item *item)
780{
781 configfs_undepend_item(item);
782}
783
784int o2nm_depend_this_node(void)
785{
786 int ret = 0;
787 struct o2nm_node *local_node;
788
789 local_node = o2nm_get_node_by_num(o2nm_this_node());
790 if (!local_node) {
791 ret = -EINVAL;
792 goto out;
793 }
794
795 ret = o2nm_depend_item(&local_node->nd_item);
796 o2nm_node_put(local_node);
797
798out:
799 return ret;
800}
801
802void o2nm_undepend_this_node(void)
803{
804 struct o2nm_node *local_node;
805
806 local_node = o2nm_get_node_by_num(o2nm_this_node());
807 BUG_ON(!local_node);
808
809 o2nm_undepend_item(&local_node->nd_item);
810 o2nm_node_put(local_node);
811}
812
813
814static void __exit exit_o2nm(void)
815{
816 /* XXX sync with hb callbacks and shut down hb? */
817 o2net_unregister_hb_callbacks();
818 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
819 o2cb_sys_shutdown();
820
821 o2net_exit();
822 o2hb_exit();
823}
824
825static int __init init_o2nm(void)
826{
827 int ret;
828
829 o2hb_init();
830
831 ret = o2net_init();
832 if (ret)
833 goto out_o2hb;
834
835 ret = o2net_register_hb_callbacks();
836 if (ret)
837 goto out_o2net;
838
839 config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
840 mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
841 ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
842 if (ret) {
843 printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
844 goto out_callbacks;
845 }
846
847 ret = o2cb_sys_init();
848 if (!ret)
849 goto out;
850
851 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
852out_callbacks:
853 o2net_unregister_hb_callbacks();
854out_o2net:
855 o2net_exit();
856out_o2hb:
857 o2hb_exit();
858out:
859 return ret;
860}
861
862MODULE_AUTHOR("Oracle");
863MODULE_LICENSE("GPL");
864MODULE_DESCRIPTION("OCFS2 cluster management");
865
866module_init(init_o2nm)
867module_exit(exit_o2nm)