xref: /openbmc/linux/fs/ocfs2/cluster/nodemanager.c (revision 6491d698)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * Copyright (C) 2004, 2005 Oracle.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public
17  * License along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA.
20  */
21 
22 #include <linux/slab.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/configfs.h>
26 
27 #include "tcp.h"
28 #include "nodemanager.h"
29 #include "heartbeat.h"
30 #include "masklog.h"
31 #include "sys.h"
32 
33 /* for now we operate under the assertion that there can be only one
34  * cluster active at a time.  Changing this will require trickling
35  * cluster references throughout where nodes are looked up */
36 struct o2nm_cluster *o2nm_single_cluster = NULL;
37 
38 static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
39 	"reset",	/* O2NM_FENCE_RESET */
40 	"panic",	/* O2NM_FENCE_PANIC */
41 };
42 
43 static inline void o2nm_lock_subsystem(void);
44 static inline void o2nm_unlock_subsystem(void);
45 
46 struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
47 {
48 	struct o2nm_node *node = NULL;
49 
50 	if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
51 		goto out;
52 
53 	read_lock(&o2nm_single_cluster->cl_nodes_lock);
54 	node = o2nm_single_cluster->cl_nodes[node_num];
55 	if (node)
56 		config_item_get(&node->nd_item);
57 	read_unlock(&o2nm_single_cluster->cl_nodes_lock);
58 out:
59 	return node;
60 }
61 EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
62 
63 int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
64 {
65 	struct o2nm_cluster *cluster = o2nm_single_cluster;
66 
67 	BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
68 
69 	if (cluster == NULL)
70 		return -EINVAL;
71 
72 	read_lock(&cluster->cl_nodes_lock);
73 	memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
74 	read_unlock(&cluster->cl_nodes_lock);
75 
76 	return 0;
77 }
78 EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
79 
80 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
81 						  __be32 ip_needle,
82 						  struct rb_node ***ret_p,
83 						  struct rb_node **ret_parent)
84 {
85 	struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
86 	struct rb_node *parent = NULL;
87 	struct o2nm_node *node, *ret = NULL;
88 
89 	while (*p) {
90 		int cmp;
91 
92 		parent = *p;
93 		node = rb_entry(parent, struct o2nm_node, nd_ip_node);
94 
95 		cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
96 				sizeof(ip_needle));
97 		if (cmp < 0)
98 			p = &(*p)->rb_left;
99 		else if (cmp > 0)
100 			p = &(*p)->rb_right;
101 		else {
102 			ret = node;
103 			break;
104 		}
105 	}
106 
107 	if (ret_p != NULL)
108 		*ret_p = p;
109 	if (ret_parent != NULL)
110 		*ret_parent = parent;
111 
112 	return ret;
113 }
114 
115 struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
116 {
117 	struct o2nm_node *node = NULL;
118 	struct o2nm_cluster *cluster = o2nm_single_cluster;
119 
120 	if (cluster == NULL)
121 		goto out;
122 
123 	read_lock(&cluster->cl_nodes_lock);
124 	node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
125 	if (node)
126 		config_item_get(&node->nd_item);
127 	read_unlock(&cluster->cl_nodes_lock);
128 
129 out:
130 	return node;
131 }
132 EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
133 
134 void o2nm_node_put(struct o2nm_node *node)
135 {
136 	config_item_put(&node->nd_item);
137 }
138 EXPORT_SYMBOL_GPL(o2nm_node_put);
139 
140 void o2nm_node_get(struct o2nm_node *node)
141 {
142 	config_item_get(&node->nd_item);
143 }
144 EXPORT_SYMBOL_GPL(o2nm_node_get);
145 
146 u8 o2nm_this_node(void)
147 {
148 	u8 node_num = O2NM_MAX_NODES;
149 
150 	if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
151 		node_num = o2nm_single_cluster->cl_local_node;
152 
153 	return node_num;
154 }
155 EXPORT_SYMBOL_GPL(o2nm_this_node);
156 
157 /* node configfs bits */
158 
159 static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
160 {
161 	return item ?
162 		container_of(to_config_group(item), struct o2nm_cluster,
163 			     cl_group)
164 		: NULL;
165 }
166 
167 static struct o2nm_node *to_o2nm_node(struct config_item *item)
168 {
169 	return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
170 }
171 
172 static void o2nm_node_release(struct config_item *item)
173 {
174 	struct o2nm_node *node = to_o2nm_node(item);
175 	kfree(node);
176 }
177 
178 static ssize_t o2nm_node_num_show(struct config_item *item, char *page)
179 {
180 	return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num);
181 }
182 
183 static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
184 {
185 	/* through the first node_set .parent
186 	 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
187 	if (node->nd_item.ci_parent)
188 		return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
189 	else
190 		return NULL;
191 }
192 
193 enum {
194 	O2NM_NODE_ATTR_NUM = 0,
195 	O2NM_NODE_ATTR_PORT,
196 	O2NM_NODE_ATTR_ADDRESS,
197 };
198 
199 static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
200 				   size_t count)
201 {
202 	struct o2nm_node *node = to_o2nm_node(item);
203 	struct o2nm_cluster *cluster;
204 	unsigned long tmp;
205 	char *p = (char *)page;
206 	int ret = 0;
207 
208 	tmp = simple_strtoul(p, &p, 0);
209 	if (!p || (*p && (*p != '\n')))
210 		return -EINVAL;
211 
212 	if (tmp >= O2NM_MAX_NODES)
213 		return -ERANGE;
214 
215 	/* once we're in the cl_nodes tree networking can look us up by
216 	 * node number and try to use our address and port attributes
217 	 * to connect to this node.. make sure that they've been set
218 	 * before writing the node attribute? */
219 	if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
220 	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
221 		return -EINVAL; /* XXX */
222 
223 	o2nm_lock_subsystem();
224 	cluster = to_o2nm_cluster_from_node(node);
225 	if (!cluster) {
226 		o2nm_unlock_subsystem();
227 		return -EINVAL;
228 	}
229 
230 	write_lock(&cluster->cl_nodes_lock);
231 	if (cluster->cl_nodes[tmp])
232 		ret = -EEXIST;
233 	else if (test_and_set_bit(O2NM_NODE_ATTR_NUM,
234 			&node->nd_set_attributes))
235 		ret = -EBUSY;
236 	else  {
237 		cluster->cl_nodes[tmp] = node;
238 		node->nd_num = tmp;
239 		set_bit(tmp, cluster->cl_nodes_bitmap);
240 	}
241 	write_unlock(&cluster->cl_nodes_lock);
242 	o2nm_unlock_subsystem();
243 
244 	if (ret)
245 		return ret;
246 
247 	return count;
248 }
249 static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page)
250 {
251 	return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port));
252 }
253 
254 static ssize_t o2nm_node_ipv4_port_store(struct config_item *item,
255 					 const char *page, size_t count)
256 {
257 	struct o2nm_node *node = to_o2nm_node(item);
258 	unsigned long tmp;
259 	char *p = (char *)page;
260 
261 	tmp = simple_strtoul(p, &p, 0);
262 	if (!p || (*p && (*p != '\n')))
263 		return -EINVAL;
264 
265 	if (tmp == 0)
266 		return -EINVAL;
267 	if (tmp >= (u16)-1)
268 		return -ERANGE;
269 
270 	if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
271 		return -EBUSY;
272 	node->nd_ipv4_port = htons(tmp);
273 
274 	return count;
275 }
276 
277 static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page)
278 {
279 	return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address);
280 }
281 
282 static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
283 					    const char *page,
284 					    size_t count)
285 {
286 	struct o2nm_node *node = to_o2nm_node(item);
287 	struct o2nm_cluster *cluster;
288 	int ret, i;
289 	struct rb_node **p, *parent;
290 	unsigned int octets[4];
291 	__be32 ipv4_addr = 0;
292 
293 	ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
294 		     &octets[1], &octets[0]);
295 	if (ret != 4)
296 		return -EINVAL;
297 
298 	for (i = 0; i < ARRAY_SIZE(octets); i++) {
299 		if (octets[i] > 255)
300 			return -ERANGE;
301 		be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
302 	}
303 
304 	o2nm_lock_subsystem();
305 	cluster = to_o2nm_cluster_from_node(node);
306 	if (!cluster) {
307 		o2nm_unlock_subsystem();
308 		return -EINVAL;
309 	}
310 
311 	ret = 0;
312 	write_lock(&cluster->cl_nodes_lock);
313 	if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
314 		ret = -EEXIST;
315 	else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS,
316 			&node->nd_set_attributes))
317 		ret = -EBUSY;
318 	else {
319 		rb_link_node(&node->nd_ip_node, parent, p);
320 		rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
321 	}
322 	write_unlock(&cluster->cl_nodes_lock);
323 	o2nm_unlock_subsystem();
324 
325 	if (ret)
326 		return ret;
327 
328 	memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
329 
330 	return count;
331 }
332 
333 static ssize_t o2nm_node_local_show(struct config_item *item, char *page)
334 {
335 	return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local);
336 }
337 
338 static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
339 				     size_t count)
340 {
341 	struct o2nm_node *node = to_o2nm_node(item);
342 	struct o2nm_cluster *cluster;
343 	unsigned long tmp;
344 	char *p = (char *)page;
345 	ssize_t ret;
346 
347 	tmp = simple_strtoul(p, &p, 0);
348 	if (!p || (*p && (*p != '\n')))
349 		return -EINVAL;
350 
351 	tmp = !!tmp; /* boolean of whether this node wants to be local */
352 
353 	/* setting local turns on networking rx for now so we require having
354 	 * set everything else first */
355 	if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
356 	    !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
357 	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
358 		return -EINVAL; /* XXX */
359 
360 	o2nm_lock_subsystem();
361 	cluster = to_o2nm_cluster_from_node(node);
362 	if (!cluster) {
363 		ret = -EINVAL;
364 		goto out;
365 	}
366 
367 	/* the only failure case is trying to set a new local node
368 	 * when a different one is already set */
369 	if (tmp && tmp == cluster->cl_has_local &&
370 	    cluster->cl_local_node != node->nd_num) {
371 		ret = -EBUSY;
372 		goto out;
373 	}
374 
375 	/* bring up the rx thread if we're setting the new local node. */
376 	if (tmp && !cluster->cl_has_local) {
377 		ret = o2net_start_listening(node);
378 		if (ret)
379 			goto out;
380 	}
381 
382 	if (!tmp && cluster->cl_has_local &&
383 	    cluster->cl_local_node == node->nd_num) {
384 		o2net_stop_listening(node);
385 		cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
386 	}
387 
388 	node->nd_local = tmp;
389 	if (node->nd_local) {
390 		cluster->cl_has_local = tmp;
391 		cluster->cl_local_node = node->nd_num;
392 	}
393 
394 	ret = count;
395 
396 out:
397 	o2nm_unlock_subsystem();
398 	return ret;
399 }
400 
401 CONFIGFS_ATTR(o2nm_node_, num);
402 CONFIGFS_ATTR(o2nm_node_, ipv4_port);
403 CONFIGFS_ATTR(o2nm_node_, ipv4_address);
404 CONFIGFS_ATTR(o2nm_node_, local);
405 
406 static struct configfs_attribute *o2nm_node_attrs[] = {
407 	&o2nm_node_attr_num,
408 	&o2nm_node_attr_ipv4_port,
409 	&o2nm_node_attr_ipv4_address,
410 	&o2nm_node_attr_local,
411 	NULL,
412 };
413 
414 static struct configfs_item_operations o2nm_node_item_ops = {
415 	.release		= o2nm_node_release,
416 };
417 
418 static const struct config_item_type o2nm_node_type = {
419 	.ct_item_ops	= &o2nm_node_item_ops,
420 	.ct_attrs	= o2nm_node_attrs,
421 	.ct_owner	= THIS_MODULE,
422 };
423 
424 /* node set */
425 
426 struct o2nm_node_group {
427 	struct config_group ns_group;
428 	/* some stuff? */
429 };
430 
431 #if 0
432 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
433 {
434 	return group ?
435 		container_of(group, struct o2nm_node_group, ns_group)
436 		: NULL;
437 }
438 #endif
439 
440 static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
441                                        unsigned int *val)
442 {
443 	unsigned long tmp;
444 	char *p = (char *)page;
445 
446 	tmp = simple_strtoul(p, &p, 0);
447 	if (!p || (*p && (*p != '\n')))
448 		return -EINVAL;
449 
450 	if (tmp == 0)
451 		return -EINVAL;
452 	if (tmp >= (u32)-1)
453 		return -ERANGE;
454 
455 	*val = tmp;
456 
457 	return count;
458 }
459 
460 static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item,
461 	char *page)
462 {
463 	return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms);
464 }
465 
466 static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item,
467 	const char *page, size_t count)
468 {
469 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
470 	ssize_t ret;
471 	unsigned int val;
472 
473 	ret =  o2nm_cluster_attr_write(page, count, &val);
474 
475 	if (ret > 0) {
476 		if (cluster->cl_idle_timeout_ms != val
477 			&& o2net_num_connected_peers()) {
478 			mlog(ML_NOTICE,
479 			     "o2net: cannot change idle timeout after "
480 			     "the first peer has agreed to it."
481 			     "  %d connected peers\n",
482 			     o2net_num_connected_peers());
483 			ret = -EINVAL;
484 		} else if (val <= cluster->cl_keepalive_delay_ms) {
485 			mlog(ML_NOTICE, "o2net: idle timeout must be larger "
486 			     "than keepalive delay\n");
487 			ret = -EINVAL;
488 		} else {
489 			cluster->cl_idle_timeout_ms = val;
490 		}
491 	}
492 
493 	return ret;
494 }
495 
496 static ssize_t o2nm_cluster_keepalive_delay_ms_show(
497 	struct config_item *item, char *page)
498 {
499 	return sprintf(page, "%u\n",
500 			to_o2nm_cluster(item)->cl_keepalive_delay_ms);
501 }
502 
503 static ssize_t o2nm_cluster_keepalive_delay_ms_store(
504 	struct config_item *item, const char *page, size_t count)
505 {
506 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
507 	ssize_t ret;
508 	unsigned int val;
509 
510 	ret =  o2nm_cluster_attr_write(page, count, &val);
511 
512 	if (ret > 0) {
513 		if (cluster->cl_keepalive_delay_ms != val
514 		    && o2net_num_connected_peers()) {
515 			mlog(ML_NOTICE,
516 			     "o2net: cannot change keepalive delay after"
517 			     " the first peer has agreed to it."
518 			     "  %d connected peers\n",
519 			     o2net_num_connected_peers());
520 			ret = -EINVAL;
521 		} else if (val >= cluster->cl_idle_timeout_ms) {
522 			mlog(ML_NOTICE, "o2net: keepalive delay must be "
523 			     "smaller than idle timeout\n");
524 			ret = -EINVAL;
525 		} else {
526 			cluster->cl_keepalive_delay_ms = val;
527 		}
528 	}
529 
530 	return ret;
531 }
532 
533 static ssize_t o2nm_cluster_reconnect_delay_ms_show(
534 	struct config_item *item, char *page)
535 {
536 	return sprintf(page, "%u\n",
537 			to_o2nm_cluster(item)->cl_reconnect_delay_ms);
538 }
539 
540 static ssize_t o2nm_cluster_reconnect_delay_ms_store(
541 	struct config_item *item, const char *page, size_t count)
542 {
543 	return o2nm_cluster_attr_write(page, count,
544                                &to_o2nm_cluster(item)->cl_reconnect_delay_ms);
545 }
546 
547 static ssize_t o2nm_cluster_fence_method_show(
548 	struct config_item *item, char *page)
549 {
550 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
551 	ssize_t ret = 0;
552 
553 	if (cluster)
554 		ret = sprintf(page, "%s\n",
555 			      o2nm_fence_method_desc[cluster->cl_fence_method]);
556 	return ret;
557 }
558 
559 static ssize_t o2nm_cluster_fence_method_store(
560 	struct config_item *item, const char *page, size_t count)
561 {
562 	unsigned int i;
563 
564 	if (page[count - 1] != '\n')
565 		goto bail;
566 
567 	for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
568 		if (count != strlen(o2nm_fence_method_desc[i]) + 1)
569 			continue;
570 		if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
571 			continue;
572 		if (to_o2nm_cluster(item)->cl_fence_method != i) {
573 			printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
574 			       o2nm_fence_method_desc[i]);
575 			to_o2nm_cluster(item)->cl_fence_method = i;
576 		}
577 		return count;
578 	}
579 
580 bail:
581 	return -EINVAL;
582 }
583 
584 CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms);
585 CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms);
586 CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms);
587 CONFIGFS_ATTR(o2nm_cluster_, fence_method);
588 
589 static struct configfs_attribute *o2nm_cluster_attrs[] = {
590 	&o2nm_cluster_attr_idle_timeout_ms,
591 	&o2nm_cluster_attr_keepalive_delay_ms,
592 	&o2nm_cluster_attr_reconnect_delay_ms,
593 	&o2nm_cluster_attr_fence_method,
594 	NULL,
595 };
596 
597 static struct config_item *o2nm_node_group_make_item(struct config_group *group,
598 						     const char *name)
599 {
600 	struct o2nm_node *node = NULL;
601 
602 	if (strlen(name) > O2NM_MAX_NAME_LEN)
603 		return ERR_PTR(-ENAMETOOLONG);
604 
605 	node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
606 	if (node == NULL)
607 		return ERR_PTR(-ENOMEM);
608 
609 	strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
610 	config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
611 	spin_lock_init(&node->nd_lock);
612 
613 	mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
614 
615 	return &node->nd_item;
616 }
617 
618 static void o2nm_node_group_drop_item(struct config_group *group,
619 				      struct config_item *item)
620 {
621 	struct o2nm_node *node = to_o2nm_node(item);
622 	struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
623 
624 	if (cluster->cl_nodes[node->nd_num] == node) {
625 		o2net_disconnect_node(node);
626 
627 		if (cluster->cl_has_local &&
628 		    (cluster->cl_local_node == node->nd_num)) {
629 			cluster->cl_has_local = 0;
630 			cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
631 			o2net_stop_listening(node);
632 		}
633 	}
634 
635 	/* XXX call into net to stop this node from trading messages */
636 
637 	write_lock(&cluster->cl_nodes_lock);
638 
639 	/* XXX sloppy */
640 	if (node->nd_ipv4_address)
641 		rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
642 
643 	/* nd_num might be 0 if the node number hasn't been set.. */
644 	if (cluster->cl_nodes[node->nd_num] == node) {
645 		cluster->cl_nodes[node->nd_num] = NULL;
646 		clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
647 	}
648 	write_unlock(&cluster->cl_nodes_lock);
649 
650 	mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
651 	     config_item_name(&node->nd_item));
652 
653 	config_item_put(item);
654 }
655 
656 static struct configfs_group_operations o2nm_node_group_group_ops = {
657 	.make_item	= o2nm_node_group_make_item,
658 	.drop_item	= o2nm_node_group_drop_item,
659 };
660 
661 static const struct config_item_type o2nm_node_group_type = {
662 	.ct_group_ops	= &o2nm_node_group_group_ops,
663 	.ct_owner	= THIS_MODULE,
664 };
665 
666 /* cluster */
667 
668 static void o2nm_cluster_release(struct config_item *item)
669 {
670 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
671 
672 	kfree(cluster);
673 }
674 
675 static struct configfs_item_operations o2nm_cluster_item_ops = {
676 	.release	= o2nm_cluster_release,
677 };
678 
679 static const struct config_item_type o2nm_cluster_type = {
680 	.ct_item_ops	= &o2nm_cluster_item_ops,
681 	.ct_attrs	= o2nm_cluster_attrs,
682 	.ct_owner	= THIS_MODULE,
683 };
684 
685 /* cluster set */
686 
687 struct o2nm_cluster_group {
688 	struct configfs_subsystem cs_subsys;
689 	/* some stuff? */
690 };
691 
692 #if 0
693 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
694 {
695 	return group ?
696 		container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
697 	       : NULL;
698 }
699 #endif
700 
701 static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
702 							  const char *name)
703 {
704 	struct o2nm_cluster *cluster = NULL;
705 	struct o2nm_node_group *ns = NULL;
706 	struct config_group *o2hb_group = NULL, *ret = NULL;
707 
708 	/* this runs under the parent dir's i_mutex; there can be only
709 	 * one caller in here at a time */
710 	if (o2nm_single_cluster)
711 		return ERR_PTR(-ENOSPC);
712 
713 	cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
714 	ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
715 	o2hb_group = o2hb_alloc_hb_set();
716 	if (cluster == NULL || ns == NULL || o2hb_group == NULL)
717 		goto out;
718 
719 	config_group_init_type_name(&cluster->cl_group, name,
720 				    &o2nm_cluster_type);
721 	configfs_add_default_group(&ns->ns_group, &cluster->cl_group);
722 
723 	config_group_init_type_name(&ns->ns_group, "node",
724 				    &o2nm_node_group_type);
725 	configfs_add_default_group(o2hb_group, &cluster->cl_group);
726 
727 	rwlock_init(&cluster->cl_nodes_lock);
728 	cluster->cl_node_ip_tree = RB_ROOT;
729 	cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
730 	cluster->cl_idle_timeout_ms    = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
731 	cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
732 	cluster->cl_fence_method       = O2NM_FENCE_RESET;
733 
734 	ret = &cluster->cl_group;
735 	o2nm_single_cluster = cluster;
736 
737 out:
738 	if (ret == NULL) {
739 		kfree(cluster);
740 		kfree(ns);
741 		o2hb_free_hb_set(o2hb_group);
742 		ret = ERR_PTR(-ENOMEM);
743 	}
744 
745 	return ret;
746 }
747 
748 static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
749 {
750 	struct o2nm_cluster *cluster = to_o2nm_cluster(item);
751 
752 	BUG_ON(o2nm_single_cluster != cluster);
753 	o2nm_single_cluster = NULL;
754 
755 	configfs_remove_default_groups(&cluster->cl_group);
756 	config_item_put(item);
757 }
758 
759 static struct configfs_group_operations o2nm_cluster_group_group_ops = {
760 	.make_group	= o2nm_cluster_group_make_group,
761 	.drop_item	= o2nm_cluster_group_drop_item,
762 };
763 
764 static const struct config_item_type o2nm_cluster_group_type = {
765 	.ct_group_ops	= &o2nm_cluster_group_group_ops,
766 	.ct_owner	= THIS_MODULE,
767 };
768 
769 static struct o2nm_cluster_group o2nm_cluster_group = {
770 	.cs_subsys = {
771 		.su_group = {
772 			.cg_item = {
773 				.ci_namebuf = "cluster",
774 				.ci_type = &o2nm_cluster_group_type,
775 			},
776 		},
777 	},
778 };
779 
780 static inline void o2nm_lock_subsystem(void)
781 {
782 	mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
783 }
784 
785 static inline void o2nm_unlock_subsystem(void)
786 {
787 	mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
788 }
789 
790 int o2nm_depend_item(struct config_item *item)
791 {
792 	return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
793 }
794 
795 void o2nm_undepend_item(struct config_item *item)
796 {
797 	configfs_undepend_item(item);
798 }
799 
800 int o2nm_depend_this_node(void)
801 {
802 	int ret = 0;
803 	struct o2nm_node *local_node;
804 
805 	local_node = o2nm_get_node_by_num(o2nm_this_node());
806 	if (!local_node) {
807 		ret = -EINVAL;
808 		goto out;
809 	}
810 
811 	ret = o2nm_depend_item(&local_node->nd_item);
812 	o2nm_node_put(local_node);
813 
814 out:
815 	return ret;
816 }
817 
818 void o2nm_undepend_this_node(void)
819 {
820 	struct o2nm_node *local_node;
821 
822 	local_node = o2nm_get_node_by_num(o2nm_this_node());
823 	BUG_ON(!local_node);
824 
825 	o2nm_undepend_item(&local_node->nd_item);
826 	o2nm_node_put(local_node);
827 }
828 
829 
830 static void __exit exit_o2nm(void)
831 {
832 	/* XXX sync with hb callbacks and shut down hb? */
833 	o2net_unregister_hb_callbacks();
834 	configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
835 	o2cb_sys_shutdown();
836 
837 	o2net_exit();
838 	o2hb_exit();
839 }
840 
841 static int __init init_o2nm(void)
842 {
843 	int ret = -1;
844 
845 	ret = o2hb_init();
846 	if (ret)
847 		goto out;
848 
849 	ret = o2net_init();
850 	if (ret)
851 		goto out_o2hb;
852 
853 	ret = o2net_register_hb_callbacks();
854 	if (ret)
855 		goto out_o2net;
856 
857 	config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
858 	mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
859 	ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
860 	if (ret) {
861 		printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
862 		goto out_callbacks;
863 	}
864 
865 	ret = o2cb_sys_init();
866 	if (!ret)
867 		goto out;
868 
869 	configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
870 out_callbacks:
871 	o2net_unregister_hb_callbacks();
872 out_o2net:
873 	o2net_exit();
874 out_o2hb:
875 	o2hb_exit();
876 out:
877 	return ret;
878 }
879 
880 MODULE_AUTHOR("Oracle");
881 MODULE_LICENSE("GPL");
882 MODULE_DESCRIPTION("OCFS2 cluster management");
883 
884 module_init(init_o2nm)
885 module_exit(exit_o2nm)
886