xref: /openbmc/linux/fs/ocfs2/cluster/nodemanager.c (revision 887069f4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2004, 2005 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/slab.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/configfs.h>
10 
11 #include "tcp.h"
12 #include "nodemanager.h"
13 #include "heartbeat.h"
14 #include "masklog.h"
15 #include "sys.h"
16 
17 /* for now we operate under the assertion that there can be only one
18  * cluster active at a time.  Changing this will require trickling
19  * cluster references throughout where nodes are looked up */
20 struct o2nm_cluster *o2nm_single_cluster = NULL;
21 
22 static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
23 	"reset",	/* O2NM_FENCE_RESET */
24 	"panic",	/* O2NM_FENCE_PANIC */
25 };
26 
27 static inline void o2nm_lock_subsystem(void);
28 static inline void o2nm_unlock_subsystem(void);
29 
30 struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
31 {
32 	struct o2nm_node *node = NULL;
33 
34 	if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
35 		goto out;
36 
37 	read_lock(&o2nm_single_cluster->cl_nodes_lock);
38 	node = o2nm_single_cluster->cl_nodes[node_num];
39 	if (node)
40 		config_item_get(&node->nd_item);
41 	read_unlock(&o2nm_single_cluster->cl_nodes_lock);
42 out:
43 	return node;
44 }
45 EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
46 
47 int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
48 {
49 	struct o2nm_cluster *cluster = o2nm_single_cluster;
50 
51 	BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
52 
53 	if (cluster == NULL)
54 		return -EINVAL;
55 
56 	read_lock(&cluster->cl_nodes_lock);
57 	memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
58 	read_unlock(&cluster->cl_nodes_lock);
59 
60 	return 0;
61 }
62 EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
63 
64 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
65 						  __be32 ip_needle,
66 						  struct rb_node ***ret_p,
67 						  struct rb_node **ret_parent)
68 {
69 	struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
70 	struct rb_node *parent = NULL;
71 	struct o2nm_node *node, *ret = NULL;
72 
73 	while (*p) {
74 		int cmp;
75 
76 		parent = *p;
77 		node = rb_entry(parent, struct o2nm_node, nd_ip_node);
78 
79 		cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
80 				sizeof(ip_needle));
81 		if (cmp < 0)
82 			p = &(*p)->rb_left;
83 		else if (cmp > 0)
84 			p = &(*p)->rb_right;
85 		else {
86 			ret = node;
87 			break;
88 		}
89 	}
90 
91 	if (ret_p != NULL)
92 		*ret_p = p;
93 	if (ret_parent != NULL)
94 		*ret_parent = parent;
95 
96 	return ret;
97 }
98 
99 struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
100 {
101 	struct o2nm_node *node = NULL;
102 	struct o2nm_cluster *cluster = o2nm_single_cluster;
103 
104 	if (cluster == NULL)
105 		goto out;
106 
107 	read_lock(&cluster->cl_nodes_lock);
108 	node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
109 	if (node)
110 		config_item_get(&node->nd_item);
111 	read_unlock(&cluster->cl_nodes_lock);
112 
113 out:
114 	return node;
115 }
116 EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
117 
118 void o2nm_node_put(struct o2nm_node *node)
119 {
120 	config_item_put(&node->nd_item);
121 }
122 EXPORT_SYMBOL_GPL(o2nm_node_put);
123 
124 void o2nm_node_get(struct o2nm_node *node)
125 {
126 	config_item_get(&node->nd_item);
127 }
128 EXPORT_SYMBOL_GPL(o2nm_node_get);
129 
130 u8 o2nm_this_node(void)
131 {
132 	u8 node_num = O2NM_MAX_NODES;
133 
134 	if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
135 		node_num = o2nm_single_cluster->cl_local_node;
136 
137 	return node_num;
138 }
139 EXPORT_SYMBOL_GPL(o2nm_this_node);
140 
141 /* node configfs bits */
142 
143 static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
144 {
145 	return item ?
146 		container_of(to_config_group(item), struct o2nm_cluster,
147 			     cl_group)
148 		: NULL;
149 }
150 
151 static struct o2nm_node *to_o2nm_node(struct config_item *item)
152 {
153 	return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
154 }
155 
156 static void o2nm_node_release(struct config_item *item)
157 {
158 	struct o2nm_node *node = to_o2nm_node(item);
159 	kfree(node);
160 }
161 
162 static ssize_t o2nm_node_num_show(struct config_item *item, char *page)
163 {
164 	return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num);
165 }
166 
167 static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
168 {
169 	/* through the first node_set .parent
170 	 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
171 	if (node->nd_item.ci_parent)
172 		return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
173 	else
174 		return NULL;
175 }
176 
177 enum {
178 	O2NM_NODE_ATTR_NUM = 0,
179 	O2NM_NODE_ATTR_PORT,
180 	O2NM_NODE_ATTR_ADDRESS,
181 };
182 
183 static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
184 				   size_t count)
185 {
186 	struct o2nm_node *node = to_o2nm_node(item);
187 	struct o2nm_cluster *cluster;
188 	unsigned long tmp;
189 	char *p = (char *)page;
190 	int ret = 0;
191 
192 	tmp = simple_strtoul(p, &p, 0);
193 	if (!p || (*p && (*p != '\n')))
194 		return -EINVAL;
195 
196 	if (tmp >= O2NM_MAX_NODES)
197 		return -ERANGE;
198 
199 	/* once we're in the cl_nodes tree networking can look us up by
200 	 * node number and try to use our address and port attributes
201 	 * to connect to this node.. make sure that they've been set
202 	 * before writing the node attribute? */
203 	if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
204 	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
205 		return -EINVAL; /* XXX */
206 
207 	o2nm_lock_subsystem();
208 	cluster = to_o2nm_cluster_from_node(node);
209 	if (!cluster) {
210 		o2nm_unlock_subsystem();
211 		return -EINVAL;
212 	}
213 
214 	write_lock(&cluster->cl_nodes_lock);
215 	if (cluster->cl_nodes[tmp])
216 		ret = -EEXIST;
217 	else if (test_and_set_bit(O2NM_NODE_ATTR_NUM,
218 			&node->nd_set_attributes))
219 		ret = -EBUSY;
220 	else  {
221 		cluster->cl_nodes[tmp] = node;
222 		node->nd_num = tmp;
223 		set_bit(tmp, cluster->cl_nodes_bitmap);
224 	}
225 	write_unlock(&cluster->cl_nodes_lock);
226 	o2nm_unlock_subsystem();
227 
228 	if (ret)
229 		return ret;
230 
231 	return count;
232 }
233 static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page)
234 {
235 	return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port));
236 }
237 
238 static ssize_t o2nm_node_ipv4_port_store(struct config_item *item,
239 					 const char *page, size_t count)
240 {
241 	struct o2nm_node *node = to_o2nm_node(item);
242 	unsigned long tmp;
243 	char *p = (char *)page;
244 
245 	tmp = simple_strtoul(p, &p, 0);
246 	if (!p || (*p && (*p != '\n')))
247 		return -EINVAL;
248 
249 	if (tmp == 0)
250 		return -EINVAL;
251 	if (tmp >= (u16)-1)
252 		return -ERANGE;
253 
254 	if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
255 		return -EBUSY;
256 	node->nd_ipv4_port = htons(tmp);
257 
258 	return count;
259 }
260 
261 static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page)
262 {
263 	return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address);
264 }
265 
266 static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
267 					    const char *page,
268 					    size_t count)
269 {
270 	struct o2nm_node *node = to_o2nm_node(item);
271 	struct o2nm_cluster *cluster;
272 	int ret, i;
273 	struct rb_node **p, *parent;
274 	unsigned int octets[4];
275 	__be32 ipv4_addr = 0;
276 
277 	ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
278 		     &octets[1], &octets[0]);
279 	if (ret != 4)
280 		return -EINVAL;
281 
282 	for (i = 0; i < ARRAY_SIZE(octets); i++) {
283 		if (octets[i] > 255)
284 			return -ERANGE;
285 		be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
286 	}
287 
288 	o2nm_lock_subsystem();
289 	cluster = to_o2nm_cluster_from_node(node);
290 	if (!cluster) {
291 		o2nm_unlock_subsystem();
292 		return -EINVAL;
293 	}
294 
295 	ret = 0;
296 	write_lock(&cluster->cl_nodes_lock);
297 	if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
298 		ret = -EEXIST;
299 	else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS,
300 			&node->nd_set_attributes))
301 		ret = -EBUSY;
302 	else {
303 		rb_link_node(&node->nd_ip_node, parent, p);
304 		rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
305 	}
306 	write_unlock(&cluster->cl_nodes_lock);
307 	o2nm_unlock_subsystem();
308 
309 	if (ret)
310 		return ret;
311 
312 	memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
313 
314 	return count;
315 }
316 
317 static ssize_t o2nm_node_local_show(struct config_item *item, char *page)
318 {
319 	return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local);
320 }
321 
322 static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
323 				     size_t count)
324 {
325 	struct o2nm_node *node = to_o2nm_node(item);
326 	struct o2nm_cluster *cluster;
327 	unsigned long tmp;
328 	char *p = (char *)page;
329 	ssize_t ret;
330 
331 	tmp = simple_strtoul(p, &p, 0);
332 	if (!p || (*p && (*p != '\n')))
333 		return -EINVAL;
334 
335 	tmp = !!tmp; /* boolean of whether this node wants to be local */
336 
337 	/* setting local turns on networking rx for now so we require having
338 	 * set everything else first */
339 	if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
340 	    !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
341 	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
342 		return -EINVAL; /* XXX */
343 
344 	o2nm_lock_subsystem();
345 	cluster = to_o2nm_cluster_from_node(node);
346 	if (!cluster) {
347 		ret = -EINVAL;
348 		goto out;
349 	}
350 
351 	/* the only failure case is trying to set a new local node
352 	 * when a different one is already set */
353 	if (tmp && tmp == cluster->cl_has_local &&
354 	    cluster->cl_local_node != node->nd_num) {
355 		ret = -EBUSY;
356 		goto out;
357 	}
358 
359 	/* bring up the rx thread if we're setting the new local node. */
360 	if (tmp && !cluster->cl_has_local) {
361 		ret = o2net_start_listening(node);
362 		if (ret)
363 			goto out;
364 	}
365 
366 	if (!tmp && cluster->cl_has_local &&
367 	    cluster->cl_local_node == node->nd_num) {
368 		o2net_stop_listening(node);
369 		cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
370 	}
371 
372 	node->nd_local = tmp;
373 	if (node->nd_local) {
374 		cluster->cl_has_local = tmp;
375 		cluster->cl_local_node = node->nd_num;
376 	}
377 
378 	ret = count;
379 
380 out:
381 	o2nm_unlock_subsystem();
382 	return ret;
383 }
384 
385 CONFIGFS_ATTR(o2nm_node_, num);
386 CONFIGFS_ATTR(o2nm_node_, ipv4_port);
387 CONFIGFS_ATTR(o2nm_node_, ipv4_address);
388 CONFIGFS_ATTR(o2nm_node_, local);
389 
390 static struct configfs_attribute *o2nm_node_attrs[] = {
391 	&o2nm_node_attr_num,
392 	&o2nm_node_attr_ipv4_port,
393 	&o2nm_node_attr_ipv4_address,
394 	&o2nm_node_attr_local,
395 	NULL,
396 };
397 
398 static struct configfs_item_operations o2nm_node_item_ops = {
399 	.release		= o2nm_node_release,
400 };
401 
402 static const struct config_item_type o2nm_node_type = {
403 	.ct_item_ops	= &o2nm_node_item_ops,
404 	.ct_attrs	= o2nm_node_attrs,
405 	.ct_owner	= THIS_MODULE,
406 };
407 
408 /* node set */
409 
410 struct o2nm_node_group {
411 	struct config_group ns_group;
412 	/* some stuff? */
413 };
414 
415 #if 0
416 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
417 {
418 	return group ?
419 		container_of(group, struct o2nm_node_group, ns_group)
420 		: NULL;
421 }
422 #endif
423 
424 static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
425                                        unsigned int *val)
426 {
427 	unsigned long tmp;
428 	char *p = (char *)page;
429 
430 	tmp = simple_strtoul(p, &p, 0);
431 	if (!p || (*p && (*p != '\n')))
432 		return -EINVAL;
433 
434 	if (tmp == 0)
435 		return -EINVAL;
436 	if (tmp >= (u32)-1)
437 		return -ERANGE;
438 
439 	*val = tmp;
440 
441 	return count;
442 }
443 
444 static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item,
445 	char *page)
446 {
447 	return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms);
448 }
449 
450 static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item,
451 	const char *page, size_t count)
452 {
453 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
454 	ssize_t ret;
455 	unsigned int val;
456 
457 	ret =  o2nm_cluster_attr_write(page, count, &val);
458 
459 	if (ret > 0) {
460 		if (cluster->cl_idle_timeout_ms != val
461 			&& o2net_num_connected_peers()) {
462 			mlog(ML_NOTICE,
463 			     "o2net: cannot change idle timeout after "
464 			     "the first peer has agreed to it."
465 			     "  %d connected peers\n",
466 			     o2net_num_connected_peers());
467 			ret = -EINVAL;
468 		} else if (val <= cluster->cl_keepalive_delay_ms) {
469 			mlog(ML_NOTICE, "o2net: idle timeout must be larger "
470 			     "than keepalive delay\n");
471 			ret = -EINVAL;
472 		} else {
473 			cluster->cl_idle_timeout_ms = val;
474 		}
475 	}
476 
477 	return ret;
478 }
479 
480 static ssize_t o2nm_cluster_keepalive_delay_ms_show(
481 	struct config_item *item, char *page)
482 {
483 	return sprintf(page, "%u\n",
484 			to_o2nm_cluster(item)->cl_keepalive_delay_ms);
485 }
486 
487 static ssize_t o2nm_cluster_keepalive_delay_ms_store(
488 	struct config_item *item, const char *page, size_t count)
489 {
490 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
491 	ssize_t ret;
492 	unsigned int val;
493 
494 	ret =  o2nm_cluster_attr_write(page, count, &val);
495 
496 	if (ret > 0) {
497 		if (cluster->cl_keepalive_delay_ms != val
498 		    && o2net_num_connected_peers()) {
499 			mlog(ML_NOTICE,
500 			     "o2net: cannot change keepalive delay after"
501 			     " the first peer has agreed to it."
502 			     "  %d connected peers\n",
503 			     o2net_num_connected_peers());
504 			ret = -EINVAL;
505 		} else if (val >= cluster->cl_idle_timeout_ms) {
506 			mlog(ML_NOTICE, "o2net: keepalive delay must be "
507 			     "smaller than idle timeout\n");
508 			ret = -EINVAL;
509 		} else {
510 			cluster->cl_keepalive_delay_ms = val;
511 		}
512 	}
513 
514 	return ret;
515 }
516 
517 static ssize_t o2nm_cluster_reconnect_delay_ms_show(
518 	struct config_item *item, char *page)
519 {
520 	return sprintf(page, "%u\n",
521 			to_o2nm_cluster(item)->cl_reconnect_delay_ms);
522 }
523 
524 static ssize_t o2nm_cluster_reconnect_delay_ms_store(
525 	struct config_item *item, const char *page, size_t count)
526 {
527 	return o2nm_cluster_attr_write(page, count,
528                                &to_o2nm_cluster(item)->cl_reconnect_delay_ms);
529 }
530 
531 static ssize_t o2nm_cluster_fence_method_show(
532 	struct config_item *item, char *page)
533 {
534 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
535 	ssize_t ret = 0;
536 
537 	if (cluster)
538 		ret = sprintf(page, "%s\n",
539 			      o2nm_fence_method_desc[cluster->cl_fence_method]);
540 	return ret;
541 }
542 
543 static ssize_t o2nm_cluster_fence_method_store(
544 	struct config_item *item, const char *page, size_t count)
545 {
546 	unsigned int i;
547 
548 	if (page[count - 1] != '\n')
549 		goto bail;
550 
551 	for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
552 		if (count != strlen(o2nm_fence_method_desc[i]) + 1)
553 			continue;
554 		if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
555 			continue;
556 		if (to_o2nm_cluster(item)->cl_fence_method != i) {
557 			printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
558 			       o2nm_fence_method_desc[i]);
559 			to_o2nm_cluster(item)->cl_fence_method = i;
560 		}
561 		return count;
562 	}
563 
564 bail:
565 	return -EINVAL;
566 }
567 
568 CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms);
569 CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms);
570 CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms);
571 CONFIGFS_ATTR(o2nm_cluster_, fence_method);
572 
573 static struct configfs_attribute *o2nm_cluster_attrs[] = {
574 	&o2nm_cluster_attr_idle_timeout_ms,
575 	&o2nm_cluster_attr_keepalive_delay_ms,
576 	&o2nm_cluster_attr_reconnect_delay_ms,
577 	&o2nm_cluster_attr_fence_method,
578 	NULL,
579 };
580 
581 static struct config_item *o2nm_node_group_make_item(struct config_group *group,
582 						     const char *name)
583 {
584 	struct o2nm_node *node = NULL;
585 
586 	if (strlen(name) > O2NM_MAX_NAME_LEN)
587 		return ERR_PTR(-ENAMETOOLONG);
588 
589 	node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
590 	if (node == NULL)
591 		return ERR_PTR(-ENOMEM);
592 
593 	strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
594 	config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
595 	spin_lock_init(&node->nd_lock);
596 
597 	mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
598 
599 	return &node->nd_item;
600 }
601 
602 static void o2nm_node_group_drop_item(struct config_group *group,
603 				      struct config_item *item)
604 {
605 	struct o2nm_node *node = to_o2nm_node(item);
606 	struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
607 
608 	if (cluster->cl_nodes[node->nd_num] == node) {
609 		o2net_disconnect_node(node);
610 
611 		if (cluster->cl_has_local &&
612 		    (cluster->cl_local_node == node->nd_num)) {
613 			cluster->cl_has_local = 0;
614 			cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
615 			o2net_stop_listening(node);
616 		}
617 	}
618 
619 	/* XXX call into net to stop this node from trading messages */
620 
621 	write_lock(&cluster->cl_nodes_lock);
622 
623 	/* XXX sloppy */
624 	if (node->nd_ipv4_address)
625 		rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
626 
627 	/* nd_num might be 0 if the node number hasn't been set.. */
628 	if (cluster->cl_nodes[node->nd_num] == node) {
629 		cluster->cl_nodes[node->nd_num] = NULL;
630 		clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
631 	}
632 	write_unlock(&cluster->cl_nodes_lock);
633 
634 	mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
635 	     config_item_name(&node->nd_item));
636 
637 	config_item_put(item);
638 }
639 
640 static struct configfs_group_operations o2nm_node_group_group_ops = {
641 	.make_item	= o2nm_node_group_make_item,
642 	.drop_item	= o2nm_node_group_drop_item,
643 };
644 
645 static const struct config_item_type o2nm_node_group_type = {
646 	.ct_group_ops	= &o2nm_node_group_group_ops,
647 	.ct_owner	= THIS_MODULE,
648 };
649 
650 /* cluster */
651 
652 static void o2nm_cluster_release(struct config_item *item)
653 {
654 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
655 
656 	kfree(cluster);
657 }
658 
659 static struct configfs_item_operations o2nm_cluster_item_ops = {
660 	.release	= o2nm_cluster_release,
661 };
662 
663 static const struct config_item_type o2nm_cluster_type = {
664 	.ct_item_ops	= &o2nm_cluster_item_ops,
665 	.ct_attrs	= o2nm_cluster_attrs,
666 	.ct_owner	= THIS_MODULE,
667 };
668 
669 /* cluster set */
670 
671 struct o2nm_cluster_group {
672 	struct configfs_subsystem cs_subsys;
673 	/* some stuff? */
674 };
675 
676 #if 0
677 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
678 {
679 	return group ?
680 		container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
681 	       : NULL;
682 }
683 #endif
684 
685 static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
686 							  const char *name)
687 {
688 	struct o2nm_cluster *cluster = NULL;
689 	struct o2nm_node_group *ns = NULL;
690 	struct config_group *o2hb_group = NULL, *ret = NULL;
691 
692 	/* this runs under the parent dir's i_mutex; there can be only
693 	 * one caller in here at a time */
694 	if (o2nm_single_cluster)
695 		return ERR_PTR(-ENOSPC);
696 
697 	cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
698 	ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
699 	o2hb_group = o2hb_alloc_hb_set();
700 	if (cluster == NULL || ns == NULL || o2hb_group == NULL)
701 		goto out;
702 
703 	config_group_init_type_name(&cluster->cl_group, name,
704 				    &o2nm_cluster_type);
705 	configfs_add_default_group(&ns->ns_group, &cluster->cl_group);
706 
707 	config_group_init_type_name(&ns->ns_group, "node",
708 				    &o2nm_node_group_type);
709 	configfs_add_default_group(o2hb_group, &cluster->cl_group);
710 
711 	rwlock_init(&cluster->cl_nodes_lock);
712 	cluster->cl_node_ip_tree = RB_ROOT;
713 	cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
714 	cluster->cl_idle_timeout_ms    = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
715 	cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
716 	cluster->cl_fence_method       = O2NM_FENCE_RESET;
717 
718 	ret = &cluster->cl_group;
719 	o2nm_single_cluster = cluster;
720 
721 out:
722 	if (ret == NULL) {
723 		kfree(cluster);
724 		kfree(ns);
725 		o2hb_free_hb_set(o2hb_group);
726 		ret = ERR_PTR(-ENOMEM);
727 	}
728 
729 	return ret;
730 }
731 
732 static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
733 {
734 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
735 
736 	BUG_ON(o2nm_single_cluster != cluster);
737 	o2nm_single_cluster = NULL;
738 
739 	configfs_remove_default_groups(&cluster->cl_group);
740 	config_item_put(item);
741 }
742 
743 static struct configfs_group_operations o2nm_cluster_group_group_ops = {
744 	.make_group	= o2nm_cluster_group_make_group,
745 	.drop_item	= o2nm_cluster_group_drop_item,
746 };
747 
748 static const struct config_item_type o2nm_cluster_group_type = {
749 	.ct_group_ops	= &o2nm_cluster_group_group_ops,
750 	.ct_owner	= THIS_MODULE,
751 };
752 
753 static struct o2nm_cluster_group o2nm_cluster_group = {
754 	.cs_subsys = {
755 		.su_group = {
756 			.cg_item = {
757 				.ci_namebuf = "cluster",
758 				.ci_type = &o2nm_cluster_group_type,
759 			},
760 		},
761 	},
762 };
763 
764 static inline void o2nm_lock_subsystem(void)
765 {
766 	mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
767 }
768 
769 static inline void o2nm_unlock_subsystem(void)
770 {
771 	mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
772 }
773 
774 int o2nm_depend_item(struct config_item *item)
775 {
776 	return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
777 }
778 
779 void o2nm_undepend_item(struct config_item *item)
780 {
781 	configfs_undepend_item(item);
782 }
783 
784 int o2nm_depend_this_node(void)
785 {
786 	int ret = 0;
787 	struct o2nm_node *local_node;
788 
789 	local_node = o2nm_get_node_by_num(o2nm_this_node());
790 	if (!local_node) {
791 		ret = -EINVAL;
792 		goto out;
793 	}
794 
795 	ret = o2nm_depend_item(&local_node->nd_item);
796 	o2nm_node_put(local_node);
797 
798 out:
799 	return ret;
800 }
801 
802 void o2nm_undepend_this_node(void)
803 {
804 	struct o2nm_node *local_node;
805 
806 	local_node = o2nm_get_node_by_num(o2nm_this_node());
807 	BUG_ON(!local_node);
808 
809 	o2nm_undepend_item(&local_node->nd_item);
810 	o2nm_node_put(local_node);
811 }
812 
813 
814 static void __exit exit_o2nm(void)
815 {
816 	/* XXX sync with hb callbacks and shut down hb? */
817 	o2net_unregister_hb_callbacks();
818 	configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
819 	o2cb_sys_shutdown();
820 
821 	o2net_exit();
822 	o2hb_exit();
823 }
824 
825 static int __init init_o2nm(void)
826 {
827 	int ret;
828 
829 	o2hb_init();
830 
831 	ret = o2net_init();
832 	if (ret)
833 		goto out_o2hb;
834 
835 	ret = o2net_register_hb_callbacks();
836 	if (ret)
837 		goto out_o2net;
838 
839 	config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
840 	mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
841 	ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
842 	if (ret) {
843 		printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
844 		goto out_callbacks;
845 	}
846 
847 	ret = o2cb_sys_init();
848 	if (!ret)
849 		goto out;
850 
851 	configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
852 out_callbacks:
853 	o2net_unregister_hb_callbacks();
854 out_o2net:
855 	o2net_exit();
856 out_o2hb:
857 	o2hb_exit();
858 out:
859 	return ret;
860 }
861 
862 MODULE_AUTHOR("Oracle");
863 MODULE_LICENSE("GPL");
864 MODULE_DESCRIPTION("OCFS2 cluster management");
865 
866 module_init(init_o2nm)
867 module_exit(exit_o2nm)
868