1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * Copyright (C) 2004, 2005 Oracle. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public 17 * License along with this program; if not, write to the 18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 19 * Boston, MA 021110-1307, USA. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/jiffies.h> 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/bio.h> 28 #include <linux/blkdev.h> 29 #include <linux/delay.h> 30 #include <linux/file.h> 31 #include <linux/kthread.h> 32 #include <linux/configfs.h> 33 #include <linux/random.h> 34 #include <linux/crc32.h> 35 #include <linux/time.h> 36 #include <linux/debugfs.h> 37 #include <linux/slab.h> 38 #include <linux/bitmap.h> 39 #include <linux/ktime.h> 40 #include "heartbeat.h" 41 #include "tcp.h" 42 #include "nodemanager.h" 43 #include "quorum.h" 44 45 #include "masklog.h" 46 47 48 /* 49 * The first heartbeat pass had one global thread that would serialize all hb 50 * callback calls. This global serializing sem should only be removed once 51 * we've made sure that all callees can deal with being called concurrently 52 * from multiple hb region threads. 53 */ 54 static DECLARE_RWSEM(o2hb_callback_sem); 55 56 /* 57 * multiple hb threads are watching multiple regions. A node is live 58 * whenever any of the threads sees activity from the node in its region. 59 */ 60 static DEFINE_SPINLOCK(o2hb_live_lock); 61 static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; 62 static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 63 static LIST_HEAD(o2hb_node_events); 64 static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue); 65 66 /* 67 * In global heartbeat, we maintain a series of region bitmaps. 68 * - o2hb_region_bitmap allows us to limit the region number to max region. 69 * - o2hb_live_region_bitmap tracks live regions (seen steady iterations). 70 * - o2hb_quorum_region_bitmap tracks live regions that have seen all nodes 71 * heartbeat on it. 72 * - o2hb_failed_region_bitmap tracks the regions that have seen io timeouts. 73 */ 74 static unsigned long o2hb_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; 75 static unsigned long o2hb_live_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; 76 static unsigned long o2hb_quorum_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; 77 static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; 78 79 #define O2HB_DB_TYPE_LIVENODES 0 80 #define O2HB_DB_TYPE_LIVEREGIONS 1 81 #define O2HB_DB_TYPE_QUORUMREGIONS 2 82 #define O2HB_DB_TYPE_FAILEDREGIONS 3 83 #define O2HB_DB_TYPE_REGION_LIVENODES 4 84 #define O2HB_DB_TYPE_REGION_NUMBER 5 85 #define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6 86 #define O2HB_DB_TYPE_REGION_PINNED 7 87 struct o2hb_debug_buf { 88 int db_type; 89 int db_size; 90 int db_len; 91 void *db_data; 92 }; 93 94 static struct o2hb_debug_buf *o2hb_db_livenodes; 95 static struct o2hb_debug_buf *o2hb_db_liveregions; 96 static struct o2hb_debug_buf *o2hb_db_quorumregions; 97 static struct o2hb_debug_buf *o2hb_db_failedregions; 98 99 #define O2HB_DEBUG_DIR "o2hb" 100 #define O2HB_DEBUG_LIVENODES "livenodes" 101 #define O2HB_DEBUG_LIVEREGIONS "live_regions" 102 #define O2HB_DEBUG_QUORUMREGIONS "quorum_regions" 103 #define O2HB_DEBUG_FAILEDREGIONS "failed_regions" 104 #define O2HB_DEBUG_REGION_NUMBER "num" 105 #define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms" 106 #define O2HB_DEBUG_REGION_PINNED "pinned" 107 108 static struct dentry *o2hb_debug_dir; 109 static struct dentry *o2hb_debug_livenodes; 110 static struct dentry *o2hb_debug_liveregions; 111 static struct dentry *o2hb_debug_quorumregions; 112 static struct dentry *o2hb_debug_failedregions; 113 114 static LIST_HEAD(o2hb_all_regions); 115 116 static struct o2hb_callback { 117 struct list_head list; 118 } o2hb_callbacks[O2HB_NUM_CB]; 119 120 static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); 121 122 #define O2HB_DEFAULT_BLOCK_BITS 9 123 124 enum o2hb_heartbeat_modes { 125 O2HB_HEARTBEAT_LOCAL = 0, 126 O2HB_HEARTBEAT_GLOBAL, 127 O2HB_HEARTBEAT_NUM_MODES, 128 }; 129 130 char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = { 131 "local", /* O2HB_HEARTBEAT_LOCAL */ 132 "global", /* O2HB_HEARTBEAT_GLOBAL */ 133 }; 134 135 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; 136 unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL; 137 138 /* 139 * o2hb_dependent_users tracks the number of registered callbacks that depend 140 * on heartbeat. o2net and o2dlm are two entities that register this callback. 141 * However only o2dlm depends on the heartbeat. It does not want the heartbeat 142 * to stop while a dlm domain is still active. 143 */ 144 unsigned int o2hb_dependent_users; 145 146 /* 147 * In global heartbeat mode, all regions are pinned if there are one or more 148 * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All 149 * regions are unpinned if the region count exceeds the cut off or the number 150 * of dependent users falls to zero. 151 */ 152 #define O2HB_PIN_CUT_OFF 3 153 154 /* 155 * In local heartbeat mode, we assume the dlm domain name to be the same as 156 * region uuid. This is true for domains created for the file system but not 157 * necessarily true for userdlm domains. This is a known limitation. 158 * 159 * In global heartbeat mode, we pin/unpin all o2hb regions. This solution 160 * works for both file system and userdlm domains. 161 */ 162 static int o2hb_region_pin(const char *region_uuid); 163 static void o2hb_region_unpin(const char *region_uuid); 164 165 /* Only sets a new threshold if there are no active regions. 166 * 167 * No locking or otherwise interesting code is required for reading 168 * o2hb_dead_threshold as it can't change once regions are active and 169 * it's not interesting to anyone until then anyway. */ 170 static void o2hb_dead_threshold_set(unsigned int threshold) 171 { 172 if (threshold > O2HB_MIN_DEAD_THRESHOLD) { 173 spin_lock(&o2hb_live_lock); 174 if (list_empty(&o2hb_all_regions)) 175 o2hb_dead_threshold = threshold; 176 spin_unlock(&o2hb_live_lock); 177 } 178 } 179 180 static int o2hb_global_heartbeat_mode_set(unsigned int hb_mode) 181 { 182 int ret = -1; 183 184 if (hb_mode < O2HB_HEARTBEAT_NUM_MODES) { 185 spin_lock(&o2hb_live_lock); 186 if (list_empty(&o2hb_all_regions)) { 187 o2hb_heartbeat_mode = hb_mode; 188 ret = 0; 189 } 190 spin_unlock(&o2hb_live_lock); 191 } 192 193 return ret; 194 } 195 196 struct o2hb_node_event { 197 struct list_head hn_item; 198 enum o2hb_callback_type hn_event_type; 199 struct o2nm_node *hn_node; 200 int hn_node_num; 201 }; 202 203 struct o2hb_disk_slot { 204 struct o2hb_disk_heartbeat_block *ds_raw_block; 205 u8 ds_node_num; 206 u64 ds_last_time; 207 u64 ds_last_generation; 208 u16 ds_equal_samples; 209 u16 ds_changed_samples; 210 struct list_head ds_live_item; 211 }; 212 213 /* each thread owns a region.. when we're asked to tear down the region 214 * we ask the thread to stop, who cleans up the region */ 215 struct o2hb_region { 216 struct config_item hr_item; 217 218 struct list_head hr_all_item; 219 unsigned hr_unclean_stop:1, 220 hr_aborted_start:1, 221 hr_item_pinned:1, 222 hr_item_dropped:1, 223 hr_node_deleted:1; 224 225 /* protected by the hr_callback_sem */ 226 struct task_struct *hr_task; 227 228 unsigned int hr_blocks; 229 unsigned long long hr_start_block; 230 231 unsigned int hr_block_bits; 232 unsigned int hr_block_bytes; 233 234 unsigned int hr_slots_per_page; 235 unsigned int hr_num_pages; 236 237 struct page **hr_slot_data; 238 struct block_device *hr_bdev; 239 struct o2hb_disk_slot *hr_slots; 240 241 /* live node map of this region */ 242 unsigned long hr_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 243 unsigned int hr_region_num; 244 245 struct dentry *hr_debug_dir; 246 struct dentry *hr_debug_livenodes; 247 struct dentry *hr_debug_regnum; 248 struct dentry *hr_debug_elapsed_time; 249 struct dentry *hr_debug_pinned; 250 struct o2hb_debug_buf *hr_db_livenodes; 251 struct o2hb_debug_buf *hr_db_regnum; 252 struct o2hb_debug_buf *hr_db_elapsed_time; 253 struct o2hb_debug_buf *hr_db_pinned; 254 255 /* let the person setting up hb wait for it to return until it 256 * has reached a 'steady' state. This will be fixed when we have 257 * a more complete api that doesn't lead to this sort of fragility. */ 258 atomic_t hr_steady_iterations; 259 260 /* terminate o2hb thread if it does not reach steady state 261 * (hr_steady_iterations == 0) within hr_unsteady_iterations */ 262 atomic_t hr_unsteady_iterations; 263 264 char hr_dev_name[BDEVNAME_SIZE]; 265 266 unsigned int hr_timeout_ms; 267 268 /* randomized as the region goes up and down so that a node 269 * recognizes a node going up and down in one iteration */ 270 u64 hr_generation; 271 272 struct delayed_work hr_write_timeout_work; 273 unsigned long hr_last_timeout_start; 274 275 /* negotiate timer, used to negotiate extending hb timeout. */ 276 struct delayed_work hr_nego_timeout_work; 277 unsigned long hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 278 279 /* Used during o2hb_check_slot to hold a copy of the block 280 * being checked because we temporarily have to zero out the 281 * crc field. */ 282 struct o2hb_disk_heartbeat_block *hr_tmp_block; 283 284 /* Message key for negotiate timeout message. */ 285 unsigned int hr_key; 286 struct list_head hr_handler_list; 287 288 /* last hb status, 0 for success, other value for error. */ 289 int hr_last_hb_status; 290 }; 291 292 struct o2hb_bio_wait_ctxt { 293 atomic_t wc_num_reqs; 294 struct completion wc_io_complete; 295 int wc_error; 296 }; 297 298 #define O2HB_NEGO_TIMEOUT_MS (O2HB_MAX_WRITE_TIMEOUT_MS/2) 299 300 enum { 301 O2HB_NEGO_TIMEOUT_MSG = 1, 302 O2HB_NEGO_APPROVE_MSG = 2, 303 }; 304 305 struct o2hb_nego_msg { 306 u8 node_num; 307 }; 308 309 static void o2hb_write_timeout(struct work_struct *work) 310 { 311 int failed, quorum; 312 struct o2hb_region *reg = 313 container_of(work, struct o2hb_region, 314 hr_write_timeout_work.work); 315 316 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 317 "milliseconds\n", reg->hr_dev_name, 318 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 319 320 if (o2hb_global_heartbeat_active()) { 321 spin_lock(&o2hb_live_lock); 322 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) 323 set_bit(reg->hr_region_num, o2hb_failed_region_bitmap); 324 failed = bitmap_weight(o2hb_failed_region_bitmap, 325 O2NM_MAX_REGIONS); 326 quorum = bitmap_weight(o2hb_quorum_region_bitmap, 327 O2NM_MAX_REGIONS); 328 spin_unlock(&o2hb_live_lock); 329 330 mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n", 331 quorum, failed); 332 333 /* 334 * Fence if the number of failed regions >= half the number 335 * of quorum regions 336 */ 337 if ((failed << 1) < quorum) 338 return; 339 } 340 341 o2quo_disk_timeout(); 342 } 343 344 static void o2hb_arm_timeout(struct o2hb_region *reg) 345 { 346 /* Arm writeout only after thread reaches steady state */ 347 if (atomic_read(®->hr_steady_iterations) != 0) 348 return; 349 350 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", 351 O2HB_MAX_WRITE_TIMEOUT_MS); 352 353 if (o2hb_global_heartbeat_active()) { 354 spin_lock(&o2hb_live_lock); 355 clear_bit(reg->hr_region_num, o2hb_failed_region_bitmap); 356 spin_unlock(&o2hb_live_lock); 357 } 358 cancel_delayed_work(®->hr_write_timeout_work); 359 schedule_delayed_work(®->hr_write_timeout_work, 360 msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)); 361 362 cancel_delayed_work(®->hr_nego_timeout_work); 363 /* negotiate timeout must be less than write timeout. */ 364 schedule_delayed_work(®->hr_nego_timeout_work, 365 msecs_to_jiffies(O2HB_NEGO_TIMEOUT_MS)); 366 memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap)); 367 } 368 369 static void o2hb_disarm_timeout(struct o2hb_region *reg) 370 { 371 cancel_delayed_work_sync(®->hr_write_timeout_work); 372 cancel_delayed_work_sync(®->hr_nego_timeout_work); 373 } 374 375 static int o2hb_send_nego_msg(int key, int type, u8 target) 376 { 377 struct o2hb_nego_msg msg; 378 int status, ret; 379 380 msg.node_num = o2nm_this_node(); 381 again: 382 ret = o2net_send_message(type, key, &msg, sizeof(msg), 383 target, &status); 384 385 if (ret == -EAGAIN || ret == -ENOMEM) { 386 msleep(100); 387 goto again; 388 } 389 390 return ret; 391 } 392 393 static void o2hb_nego_timeout(struct work_struct *work) 394 { 395 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 396 int master_node, i, ret; 397 struct o2hb_region *reg; 398 399 reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work); 400 /* don't negotiate timeout if last hb failed since it is very 401 * possible io failed. Should let write timeout fence self. 402 */ 403 if (reg->hr_last_hb_status) 404 return; 405 406 o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap)); 407 /* lowest node as master node to make negotiate decision. */ 408 master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0); 409 410 if (master_node == o2nm_this_node()) { 411 if (!test_bit(master_node, reg->hr_nego_node_bitmap)) { 412 printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s).\n", 413 o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, 414 config_item_name(®->hr_item), reg->hr_dev_name); 415 set_bit(master_node, reg->hr_nego_node_bitmap); 416 } 417 if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap, 418 sizeof(reg->hr_nego_node_bitmap))) { 419 /* check negotiate bitmap every second to do timeout 420 * approve decision. 421 */ 422 schedule_delayed_work(®->hr_nego_timeout_work, 423 msecs_to_jiffies(1000)); 424 425 return; 426 } 427 428 printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%s) is down.\n", 429 config_item_name(®->hr_item), reg->hr_dev_name); 430 /* approve negotiate timeout request. */ 431 o2hb_arm_timeout(reg); 432 433 i = -1; 434 while ((i = find_next_bit(live_node_bitmap, 435 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 436 if (i == master_node) 437 continue; 438 439 mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i); 440 ret = o2hb_send_nego_msg(reg->hr_key, 441 O2HB_NEGO_APPROVE_MSG, i); 442 if (ret) 443 mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n", 444 i, ret); 445 } 446 } else { 447 /* negotiate timeout with master node. */ 448 printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s), negotiate timeout with node %d.\n", 449 o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(®->hr_item), 450 reg->hr_dev_name, master_node); 451 ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG, 452 master_node); 453 if (ret) 454 mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n", 455 master_node, ret); 456 } 457 } 458 459 static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data, 460 void **ret_data) 461 { 462 struct o2hb_region *reg = data; 463 struct o2hb_nego_msg *nego_msg; 464 465 nego_msg = (struct o2hb_nego_msg *)msg->buf; 466 printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%s).\n", 467 nego_msg->node_num, config_item_name(®->hr_item), reg->hr_dev_name); 468 if (nego_msg->node_num < O2NM_MAX_NODES) 469 set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap); 470 else 471 mlog(ML_ERROR, "got nego timeout message from bad node.\n"); 472 473 return 0; 474 } 475 476 static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data, 477 void **ret_data) 478 { 479 struct o2hb_region *reg = data; 480 481 printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%s).\n", 482 config_item_name(®->hr_item), reg->hr_dev_name); 483 o2hb_arm_timeout(reg); 484 return 0; 485 } 486 487 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc) 488 { 489 atomic_set(&wc->wc_num_reqs, 1); 490 init_completion(&wc->wc_io_complete); 491 wc->wc_error = 0; 492 } 493 494 /* Used in error paths too */ 495 static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc, 496 unsigned int num) 497 { 498 /* sadly atomic_sub_and_test() isn't available on all platforms. The 499 * good news is that the fast path only completes one at a time */ 500 while(num--) { 501 if (atomic_dec_and_test(&wc->wc_num_reqs)) { 502 BUG_ON(num > 0); 503 complete(&wc->wc_io_complete); 504 } 505 } 506 } 507 508 static void o2hb_wait_on_io(struct o2hb_bio_wait_ctxt *wc) 509 { 510 o2hb_bio_wait_dec(wc, 1); 511 wait_for_completion(&wc->wc_io_complete); 512 } 513 514 static void o2hb_bio_end_io(struct bio *bio) 515 { 516 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; 517 518 if (bio->bi_status) { 519 mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); 520 wc->wc_error = blk_status_to_errno(bio->bi_status); 521 } 522 523 o2hb_bio_wait_dec(wc, 1); 524 bio_put(bio); 525 } 526 527 /* Setup a Bio to cover I/O against num_slots slots starting at 528 * start_slot. */ 529 static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, 530 struct o2hb_bio_wait_ctxt *wc, 531 unsigned int *current_slot, 532 unsigned int max_slots, int op, 533 int op_flags) 534 { 535 int len, current_page; 536 unsigned int vec_len, vec_start; 537 unsigned int bits = reg->hr_block_bits; 538 unsigned int spp = reg->hr_slots_per_page; 539 unsigned int cs = *current_slot; 540 struct bio *bio; 541 struct page *page; 542 543 /* Testing has shown this allocation to take long enough under 544 * GFP_KERNEL that the local node can get fenced. It would be 545 * nicest if we could pre-allocate these bios and avoid this 546 * all together. */ 547 bio = bio_alloc(GFP_ATOMIC, 16); 548 if (!bio) { 549 mlog(ML_ERROR, "Could not alloc slots BIO!\n"); 550 bio = ERR_PTR(-ENOMEM); 551 goto bail; 552 } 553 554 /* Must put everything in 512 byte sectors for the bio... */ 555 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); 556 bio_set_dev(bio, reg->hr_bdev); 557 bio->bi_private = wc; 558 bio->bi_end_io = o2hb_bio_end_io; 559 bio_set_op_attrs(bio, op, op_flags); 560 561 vec_start = (cs << bits) % PAGE_SIZE; 562 while(cs < max_slots) { 563 current_page = cs / spp; 564 page = reg->hr_slot_data[current_page]; 565 566 vec_len = min(PAGE_SIZE - vec_start, 567 (max_slots-cs) * (PAGE_SIZE/spp) ); 568 569 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", 570 current_page, vec_len, vec_start); 571 572 len = bio_add_page(bio, page, vec_len, vec_start); 573 if (len != vec_len) break; 574 575 cs += vec_len / (PAGE_SIZE/spp); 576 vec_start = 0; 577 } 578 579 bail: 580 *current_slot = cs; 581 return bio; 582 } 583 584 static int o2hb_read_slots(struct o2hb_region *reg, 585 unsigned int max_slots) 586 { 587 unsigned int current_slot=0; 588 int status; 589 struct o2hb_bio_wait_ctxt wc; 590 struct bio *bio; 591 592 o2hb_bio_wait_init(&wc); 593 594 while(current_slot < max_slots) { 595 bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots, 596 REQ_OP_READ, 0); 597 if (IS_ERR(bio)) { 598 status = PTR_ERR(bio); 599 mlog_errno(status); 600 goto bail_and_wait; 601 } 602 603 atomic_inc(&wc.wc_num_reqs); 604 submit_bio(bio); 605 } 606 607 status = 0; 608 609 bail_and_wait: 610 o2hb_wait_on_io(&wc); 611 if (wc.wc_error && !status) 612 status = wc.wc_error; 613 614 return status; 615 } 616 617 static int o2hb_issue_node_write(struct o2hb_region *reg, 618 struct o2hb_bio_wait_ctxt *write_wc) 619 { 620 int status; 621 unsigned int slot; 622 struct bio *bio; 623 624 o2hb_bio_wait_init(write_wc); 625 626 slot = o2nm_this_node(); 627 628 bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE, 629 REQ_SYNC); 630 if (IS_ERR(bio)) { 631 status = PTR_ERR(bio); 632 mlog_errno(status); 633 goto bail; 634 } 635 636 atomic_inc(&write_wc->wc_num_reqs); 637 submit_bio(bio); 638 639 status = 0; 640 bail: 641 return status; 642 } 643 644 static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg, 645 struct o2hb_disk_heartbeat_block *hb_block) 646 { 647 __le32 old_cksum; 648 u32 ret; 649 650 /* We want to compute the block crc with a 0 value in the 651 * hb_cksum field. Save it off here and replace after the 652 * crc. */ 653 old_cksum = hb_block->hb_cksum; 654 hb_block->hb_cksum = 0; 655 656 ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes); 657 658 hb_block->hb_cksum = old_cksum; 659 660 return ret; 661 } 662 663 static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block) 664 { 665 mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, " 666 "cksum = 0x%x, generation 0x%llx\n", 667 (long long)le64_to_cpu(hb_block->hb_seq), 668 hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum), 669 (long long)le64_to_cpu(hb_block->hb_generation)); 670 } 671 672 static int o2hb_verify_crc(struct o2hb_region *reg, 673 struct o2hb_disk_heartbeat_block *hb_block) 674 { 675 u32 read, computed; 676 677 read = le32_to_cpu(hb_block->hb_cksum); 678 computed = o2hb_compute_block_crc_le(reg, hb_block); 679 680 return read == computed; 681 } 682 683 /* 684 * Compare the slot data with what we wrote in the last iteration. 685 * If the match fails, print an appropriate error message. This is to 686 * detect errors like... another node hearting on the same slot, 687 * flaky device that is losing writes, etc. 688 * Returns 1 if check succeeds, 0 otherwise. 689 */ 690 static int o2hb_check_own_slot(struct o2hb_region *reg) 691 { 692 struct o2hb_disk_slot *slot; 693 struct o2hb_disk_heartbeat_block *hb_block; 694 char *errstr; 695 696 slot = ®->hr_slots[o2nm_this_node()]; 697 /* Don't check on our 1st timestamp */ 698 if (!slot->ds_last_time) 699 return 0; 700 701 hb_block = slot->ds_raw_block; 702 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && 703 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && 704 hb_block->hb_node == slot->ds_node_num) 705 return 1; 706 707 #define ERRSTR1 "Another node is heartbeating on device" 708 #define ERRSTR2 "Heartbeat generation mismatch on device" 709 #define ERRSTR3 "Heartbeat sequence mismatch on device" 710 711 if (hb_block->hb_node != slot->ds_node_num) 712 errstr = ERRSTR1; 713 else if (le64_to_cpu(hb_block->hb_generation) != 714 slot->ds_last_generation) 715 errstr = ERRSTR2; 716 else 717 errstr = ERRSTR3; 718 719 mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " 720 "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, 721 slot->ds_node_num, (unsigned long long)slot->ds_last_generation, 722 (unsigned long long)slot->ds_last_time, hb_block->hb_node, 723 (unsigned long long)le64_to_cpu(hb_block->hb_generation), 724 (unsigned long long)le64_to_cpu(hb_block->hb_seq)); 725 726 return 0; 727 } 728 729 static inline void o2hb_prepare_block(struct o2hb_region *reg, 730 u64 generation) 731 { 732 int node_num; 733 u64 cputime; 734 struct o2hb_disk_slot *slot; 735 struct o2hb_disk_heartbeat_block *hb_block; 736 737 node_num = o2nm_this_node(); 738 slot = ®->hr_slots[node_num]; 739 740 hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block; 741 memset(hb_block, 0, reg->hr_block_bytes); 742 /* TODO: time stuff */ 743 cputime = ktime_get_real_seconds(); 744 if (!cputime) 745 cputime = 1; 746 747 hb_block->hb_seq = cpu_to_le64(cputime); 748 hb_block->hb_node = node_num; 749 hb_block->hb_generation = cpu_to_le64(generation); 750 hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); 751 752 /* This step must always happen last! */ 753 hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, 754 hb_block)); 755 756 mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n", 757 (long long)generation, 758 le32_to_cpu(hb_block->hb_cksum)); 759 } 760 761 static void o2hb_fire_callbacks(struct o2hb_callback *hbcall, 762 struct o2nm_node *node, 763 int idx) 764 { 765 struct o2hb_callback_func *f; 766 767 list_for_each_entry(f, &hbcall->list, hc_item) { 768 mlog(ML_HEARTBEAT, "calling funcs %p\n", f); 769 (f->hc_func)(node, idx, f->hc_data); 770 } 771 } 772 773 /* Will run the list in order until we process the passed event */ 774 static void o2hb_run_event_list(struct o2hb_node_event *queued_event) 775 { 776 struct o2hb_callback *hbcall; 777 struct o2hb_node_event *event; 778 779 /* Holding callback sem assures we don't alter the callback 780 * lists when doing this, and serializes ourselves with other 781 * processes wanting callbacks. */ 782 down_write(&o2hb_callback_sem); 783 784 spin_lock(&o2hb_live_lock); 785 while (!list_empty(&o2hb_node_events) 786 && !list_empty(&queued_event->hn_item)) { 787 event = list_entry(o2hb_node_events.next, 788 struct o2hb_node_event, 789 hn_item); 790 list_del_init(&event->hn_item); 791 spin_unlock(&o2hb_live_lock); 792 793 mlog(ML_HEARTBEAT, "Node %s event for %d\n", 794 event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", 795 event->hn_node_num); 796 797 hbcall = hbcall_from_type(event->hn_event_type); 798 799 /* We should *never* have gotten on to the list with a 800 * bad type... This isn't something that we should try 801 * to recover from. */ 802 BUG_ON(IS_ERR(hbcall)); 803 804 o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); 805 806 spin_lock(&o2hb_live_lock); 807 } 808 spin_unlock(&o2hb_live_lock); 809 810 up_write(&o2hb_callback_sem); 811 } 812 813 static void o2hb_queue_node_event(struct o2hb_node_event *event, 814 enum o2hb_callback_type type, 815 struct o2nm_node *node, 816 int node_num) 817 { 818 assert_spin_locked(&o2hb_live_lock); 819 820 BUG_ON((!node) && (type != O2HB_NODE_DOWN_CB)); 821 822 event->hn_event_type = type; 823 event->hn_node = node; 824 event->hn_node_num = node_num; 825 826 mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n", 827 type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num); 828 829 list_add_tail(&event->hn_item, &o2hb_node_events); 830 } 831 832 static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) 833 { 834 struct o2hb_node_event event = 835 { .hn_item = LIST_HEAD_INIT(event.hn_item), }; 836 struct o2nm_node *node; 837 int queued = 0; 838 839 node = o2nm_get_node_by_num(slot->ds_node_num); 840 if (!node) 841 return; 842 843 spin_lock(&o2hb_live_lock); 844 if (!list_empty(&slot->ds_live_item)) { 845 mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n", 846 slot->ds_node_num); 847 848 list_del_init(&slot->ds_live_item); 849 850 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { 851 clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); 852 853 o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, 854 slot->ds_node_num); 855 queued = 1; 856 } 857 } 858 spin_unlock(&o2hb_live_lock); 859 860 if (queued) 861 o2hb_run_event_list(&event); 862 863 o2nm_node_put(node); 864 } 865 866 static void o2hb_set_quorum_device(struct o2hb_region *reg) 867 { 868 if (!o2hb_global_heartbeat_active()) 869 return; 870 871 /* Prevent race with o2hb_heartbeat_group_drop_item() */ 872 if (kthread_should_stop()) 873 return; 874 875 /* Tag region as quorum only after thread reaches steady state */ 876 if (atomic_read(®->hr_steady_iterations) != 0) 877 return; 878 879 spin_lock(&o2hb_live_lock); 880 881 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) 882 goto unlock; 883 884 /* 885 * A region can be added to the quorum only when it sees all 886 * live nodes heartbeat on it. In other words, the region has been 887 * added to all nodes. 888 */ 889 if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, 890 sizeof(o2hb_live_node_bitmap))) 891 goto unlock; 892 893 printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n", 894 config_item_name(®->hr_item), reg->hr_dev_name); 895 896 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); 897 898 /* 899 * If global heartbeat active, unpin all regions if the 900 * region count > CUT_OFF 901 */ 902 if (bitmap_weight(o2hb_quorum_region_bitmap, 903 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) 904 o2hb_region_unpin(NULL); 905 unlock: 906 spin_unlock(&o2hb_live_lock); 907 } 908 909 static int o2hb_check_slot(struct o2hb_region *reg, 910 struct o2hb_disk_slot *slot) 911 { 912 int changed = 0, gen_changed = 0; 913 struct o2hb_node_event event = 914 { .hn_item = LIST_HEAD_INIT(event.hn_item), }; 915 struct o2nm_node *node; 916 struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; 917 u64 cputime; 918 unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; 919 unsigned int slot_dead_ms; 920 int tmp; 921 int queued = 0; 922 923 memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); 924 925 /* 926 * If a node is no longer configured but is still in the livemap, we 927 * may need to clear that bit from the livemap. 928 */ 929 node = o2nm_get_node_by_num(slot->ds_node_num); 930 if (!node) { 931 spin_lock(&o2hb_live_lock); 932 tmp = test_bit(slot->ds_node_num, o2hb_live_node_bitmap); 933 spin_unlock(&o2hb_live_lock); 934 if (!tmp) 935 return 0; 936 } 937 938 if (!o2hb_verify_crc(reg, hb_block)) { 939 /* all paths from here will drop o2hb_live_lock for 940 * us. */ 941 spin_lock(&o2hb_live_lock); 942 943 /* Don't print an error on the console in this case - 944 * a freshly formatted heartbeat area will not have a 945 * crc set on it. */ 946 if (list_empty(&slot->ds_live_item)) 947 goto out; 948 949 /* The node is live but pushed out a bad crc. We 950 * consider it a transient miss but don't populate any 951 * other values as they may be junk. */ 952 mlog(ML_ERROR, "Node %d has written a bad crc to %s\n", 953 slot->ds_node_num, reg->hr_dev_name); 954 o2hb_dump_slot(hb_block); 955 956 slot->ds_equal_samples++; 957 goto fire_callbacks; 958 } 959 960 /* we don't care if these wrap.. the state transitions below 961 * clear at the right places */ 962 cputime = le64_to_cpu(hb_block->hb_seq); 963 if (slot->ds_last_time != cputime) 964 slot->ds_changed_samples++; 965 else 966 slot->ds_equal_samples++; 967 slot->ds_last_time = cputime; 968 969 /* The node changed heartbeat generations. We assume this to 970 * mean it dropped off but came back before we timed out. We 971 * want to consider it down for the time being but don't want 972 * to lose any changed_samples state we might build up to 973 * considering it live again. */ 974 if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) { 975 gen_changed = 1; 976 slot->ds_equal_samples = 0; 977 mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx " 978 "to 0x%llx)\n", slot->ds_node_num, 979 (long long)slot->ds_last_generation, 980 (long long)le64_to_cpu(hb_block->hb_generation)); 981 } 982 983 slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); 984 985 mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x " 986 "seq %llu last %llu changed %u equal %u\n", 987 slot->ds_node_num, (long long)slot->ds_last_generation, 988 le32_to_cpu(hb_block->hb_cksum), 989 (unsigned long long)le64_to_cpu(hb_block->hb_seq), 990 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, 991 slot->ds_equal_samples); 992 993 spin_lock(&o2hb_live_lock); 994 995 fire_callbacks: 996 /* dead nodes only come to life after some number of 997 * changes at any time during their dead time */ 998 if (list_empty(&slot->ds_live_item) && 999 slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) { 1000 mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n", 1001 slot->ds_node_num, (long long)slot->ds_last_generation); 1002 1003 set_bit(slot->ds_node_num, reg->hr_live_node_bitmap); 1004 1005 /* first on the list generates a callback */ 1006 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { 1007 mlog(ML_HEARTBEAT, "o2hb: Add node %d to live nodes " 1008 "bitmap\n", slot->ds_node_num); 1009 set_bit(slot->ds_node_num, o2hb_live_node_bitmap); 1010 1011 o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node, 1012 slot->ds_node_num); 1013 1014 changed = 1; 1015 queued = 1; 1016 } 1017 1018 list_add_tail(&slot->ds_live_item, 1019 &o2hb_live_slots[slot->ds_node_num]); 1020 1021 slot->ds_equal_samples = 0; 1022 1023 /* We want to be sure that all nodes agree on the 1024 * number of milliseconds before a node will be 1025 * considered dead. The self-fencing timeout is 1026 * computed from this value, and a discrepancy might 1027 * result in heartbeat calling a node dead when it 1028 * hasn't self-fenced yet. */ 1029 slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); 1030 if (slot_dead_ms && slot_dead_ms != dead_ms) { 1031 /* TODO: Perhaps we can fail the region here. */ 1032 mlog(ML_ERROR, "Node %d on device %s has a dead count " 1033 "of %u ms, but our count is %u ms.\n" 1034 "Please double check your configuration values " 1035 "for 'O2CB_HEARTBEAT_THRESHOLD'\n", 1036 slot->ds_node_num, reg->hr_dev_name, slot_dead_ms, 1037 dead_ms); 1038 } 1039 goto out; 1040 } 1041 1042 /* if the list is dead, we're done.. */ 1043 if (list_empty(&slot->ds_live_item)) 1044 goto out; 1045 1046 /* live nodes only go dead after enough consequtive missed 1047 * samples.. reset the missed counter whenever we see 1048 * activity */ 1049 if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { 1050 mlog(ML_HEARTBEAT, "Node %d left my region\n", 1051 slot->ds_node_num); 1052 1053 clear_bit(slot->ds_node_num, reg->hr_live_node_bitmap); 1054 1055 /* last off the live_slot generates a callback */ 1056 list_del_init(&slot->ds_live_item); 1057 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { 1058 mlog(ML_HEARTBEAT, "o2hb: Remove node %d from live " 1059 "nodes bitmap\n", slot->ds_node_num); 1060 clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); 1061 1062 /* node can be null */ 1063 o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, 1064 node, slot->ds_node_num); 1065 1066 changed = 1; 1067 queued = 1; 1068 } 1069 1070 /* We don't clear this because the node is still 1071 * actually writing new blocks. */ 1072 if (!gen_changed) 1073 slot->ds_changed_samples = 0; 1074 goto out; 1075 } 1076 if (slot->ds_changed_samples) { 1077 slot->ds_changed_samples = 0; 1078 slot->ds_equal_samples = 0; 1079 } 1080 out: 1081 spin_unlock(&o2hb_live_lock); 1082 1083 if (queued) 1084 o2hb_run_event_list(&event); 1085 1086 if (node) 1087 o2nm_node_put(node); 1088 return changed; 1089 } 1090 1091 static int o2hb_highest_node(unsigned long *nodes, int numbits) 1092 { 1093 return find_last_bit(nodes, numbits); 1094 } 1095 1096 static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) 1097 { 1098 int i, ret, highest_node; 1099 int membership_change = 0, own_slot_ok = 0; 1100 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1101 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1102 struct o2hb_bio_wait_ctxt write_wc; 1103 1104 ret = o2nm_configured_node_map(configured_nodes, 1105 sizeof(configured_nodes)); 1106 if (ret) { 1107 mlog_errno(ret); 1108 goto bail; 1109 } 1110 1111 /* 1112 * If a node is not configured but is in the livemap, we still need 1113 * to read the slot so as to be able to remove it from the livemap. 1114 */ 1115 o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap)); 1116 i = -1; 1117 while ((i = find_next_bit(live_node_bitmap, 1118 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1119 set_bit(i, configured_nodes); 1120 } 1121 1122 highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); 1123 if (highest_node >= O2NM_MAX_NODES) { 1124 mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); 1125 ret = -EINVAL; 1126 goto bail; 1127 } 1128 1129 /* No sense in reading the slots of nodes that don't exist 1130 * yet. Of course, if the node definitions have holes in them 1131 * then we're reading an empty slot anyway... Consider this 1132 * best-effort. */ 1133 ret = o2hb_read_slots(reg, highest_node + 1); 1134 if (ret < 0) { 1135 mlog_errno(ret); 1136 goto bail; 1137 } 1138 1139 /* With an up to date view of the slots, we can check that no 1140 * other node has been improperly configured to heartbeat in 1141 * our slot. */ 1142 own_slot_ok = o2hb_check_own_slot(reg); 1143 1144 /* fill in the proper info for our next heartbeat */ 1145 o2hb_prepare_block(reg, reg->hr_generation); 1146 1147 ret = o2hb_issue_node_write(reg, &write_wc); 1148 if (ret < 0) { 1149 mlog_errno(ret); 1150 goto bail; 1151 } 1152 1153 i = -1; 1154 while((i = find_next_bit(configured_nodes, 1155 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1156 membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]); 1157 } 1158 1159 /* 1160 * We have to be sure we've advertised ourselves on disk 1161 * before we can go to steady state. This ensures that 1162 * people we find in our steady state have seen us. 1163 */ 1164 o2hb_wait_on_io(&write_wc); 1165 if (write_wc.wc_error) { 1166 /* Do not re-arm the write timeout on I/O error - we 1167 * can't be sure that the new block ever made it to 1168 * disk */ 1169 mlog(ML_ERROR, "Write error %d on device \"%s\"\n", 1170 write_wc.wc_error, reg->hr_dev_name); 1171 ret = write_wc.wc_error; 1172 goto bail; 1173 } 1174 1175 /* Skip disarming the timeout if own slot has stale/bad data */ 1176 if (own_slot_ok) { 1177 o2hb_set_quorum_device(reg); 1178 o2hb_arm_timeout(reg); 1179 reg->hr_last_timeout_start = jiffies; 1180 } 1181 1182 bail: 1183 /* let the person who launched us know when things are steady */ 1184 if (atomic_read(®->hr_steady_iterations) != 0) { 1185 if (!ret && own_slot_ok && !membership_change) { 1186 if (atomic_dec_and_test(®->hr_steady_iterations)) 1187 wake_up(&o2hb_steady_queue); 1188 } 1189 } 1190 1191 if (atomic_read(®->hr_steady_iterations) != 0) { 1192 if (atomic_dec_and_test(®->hr_unsteady_iterations)) { 1193 printk(KERN_NOTICE "o2hb: Unable to stabilize " 1194 "heartbeart on region %s (%s)\n", 1195 config_item_name(®->hr_item), 1196 reg->hr_dev_name); 1197 atomic_set(®->hr_steady_iterations, 0); 1198 reg->hr_aborted_start = 1; 1199 wake_up(&o2hb_steady_queue); 1200 ret = -EIO; 1201 } 1202 } 1203 1204 return ret; 1205 } 1206 1207 /* 1208 * we ride the region ref that the region dir holds. before the region 1209 * dir is removed and drops it ref it will wait to tear down this 1210 * thread. 1211 */ 1212 static int o2hb_thread(void *data) 1213 { 1214 int i, ret; 1215 struct o2hb_region *reg = data; 1216 struct o2hb_bio_wait_ctxt write_wc; 1217 ktime_t before_hb, after_hb; 1218 unsigned int elapsed_msec; 1219 1220 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n"); 1221 1222 set_user_nice(current, MIN_NICE); 1223 1224 /* Pin node */ 1225 ret = o2nm_depend_this_node(); 1226 if (ret) { 1227 mlog(ML_ERROR, "Node has been deleted, ret = %d\n", ret); 1228 reg->hr_node_deleted = 1; 1229 wake_up(&o2hb_steady_queue); 1230 return 0; 1231 } 1232 1233 while (!kthread_should_stop() && 1234 !reg->hr_unclean_stop && !reg->hr_aborted_start) { 1235 /* We track the time spent inside 1236 * o2hb_do_disk_heartbeat so that we avoid more than 1237 * hr_timeout_ms between disk writes. On busy systems 1238 * this should result in a heartbeat which is less 1239 * likely to time itself out. */ 1240 before_hb = ktime_get_real(); 1241 1242 ret = o2hb_do_disk_heartbeat(reg); 1243 reg->hr_last_hb_status = ret; 1244 1245 after_hb = ktime_get_real(); 1246 1247 elapsed_msec = (unsigned int) 1248 ktime_ms_delta(after_hb, before_hb); 1249 1250 mlog(ML_HEARTBEAT, 1251 "start = %lld, end = %lld, msec = %u, ret = %d\n", 1252 before_hb, after_hb, elapsed_msec, ret); 1253 1254 if (!kthread_should_stop() && 1255 elapsed_msec < reg->hr_timeout_ms) { 1256 /* the kthread api has blocked signals for us so no 1257 * need to record the return value. */ 1258 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); 1259 } 1260 } 1261 1262 o2hb_disarm_timeout(reg); 1263 1264 /* unclean stop is only used in very bad situation */ 1265 for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++) 1266 o2hb_shutdown_slot(®->hr_slots[i]); 1267 1268 /* Explicit down notification - avoid forcing the other nodes 1269 * to timeout on this region when we could just as easily 1270 * write a clear generation - thus indicating to them that 1271 * this node has left this region. 1272 */ 1273 if (!reg->hr_unclean_stop && !reg->hr_aborted_start) { 1274 o2hb_prepare_block(reg, 0); 1275 ret = o2hb_issue_node_write(reg, &write_wc); 1276 if (ret == 0) 1277 o2hb_wait_on_io(&write_wc); 1278 else 1279 mlog_errno(ret); 1280 } 1281 1282 /* Unpin node */ 1283 o2nm_undepend_this_node(); 1284 1285 mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n"); 1286 1287 return 0; 1288 } 1289 1290 #ifdef CONFIG_DEBUG_FS 1291 static int o2hb_debug_open(struct inode *inode, struct file *file) 1292 { 1293 struct o2hb_debug_buf *db = inode->i_private; 1294 struct o2hb_region *reg; 1295 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1296 unsigned long lts; 1297 char *buf = NULL; 1298 int i = -1; 1299 int out = 0; 1300 1301 /* max_nodes should be the largest bitmap we pass here */ 1302 BUG_ON(sizeof(map) < db->db_size); 1303 1304 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 1305 if (!buf) 1306 goto bail; 1307 1308 switch (db->db_type) { 1309 case O2HB_DB_TYPE_LIVENODES: 1310 case O2HB_DB_TYPE_LIVEREGIONS: 1311 case O2HB_DB_TYPE_QUORUMREGIONS: 1312 case O2HB_DB_TYPE_FAILEDREGIONS: 1313 spin_lock(&o2hb_live_lock); 1314 memcpy(map, db->db_data, db->db_size); 1315 spin_unlock(&o2hb_live_lock); 1316 break; 1317 1318 case O2HB_DB_TYPE_REGION_LIVENODES: 1319 spin_lock(&o2hb_live_lock); 1320 reg = (struct o2hb_region *)db->db_data; 1321 memcpy(map, reg->hr_live_node_bitmap, db->db_size); 1322 spin_unlock(&o2hb_live_lock); 1323 break; 1324 1325 case O2HB_DB_TYPE_REGION_NUMBER: 1326 reg = (struct o2hb_region *)db->db_data; 1327 out += snprintf(buf + out, PAGE_SIZE - out, "%d\n", 1328 reg->hr_region_num); 1329 goto done; 1330 1331 case O2HB_DB_TYPE_REGION_ELAPSED_TIME: 1332 reg = (struct o2hb_region *)db->db_data; 1333 lts = reg->hr_last_timeout_start; 1334 /* If 0, it has never been set before */ 1335 if (lts) 1336 lts = jiffies_to_msecs(jiffies - lts); 1337 out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts); 1338 goto done; 1339 1340 case O2HB_DB_TYPE_REGION_PINNED: 1341 reg = (struct o2hb_region *)db->db_data; 1342 out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", 1343 !!reg->hr_item_pinned); 1344 goto done; 1345 1346 default: 1347 goto done; 1348 } 1349 1350 while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len) 1351 out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); 1352 out += snprintf(buf + out, PAGE_SIZE - out, "\n"); 1353 1354 done: 1355 i_size_write(inode, out); 1356 1357 file->private_data = buf; 1358 1359 return 0; 1360 bail: 1361 return -ENOMEM; 1362 } 1363 1364 static int o2hb_debug_release(struct inode *inode, struct file *file) 1365 { 1366 kfree(file->private_data); 1367 return 0; 1368 } 1369 1370 static ssize_t o2hb_debug_read(struct file *file, char __user *buf, 1371 size_t nbytes, loff_t *ppos) 1372 { 1373 return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, 1374 i_size_read(file->f_mapping->host)); 1375 } 1376 #else 1377 static int o2hb_debug_open(struct inode *inode, struct file *file) 1378 { 1379 return 0; 1380 } 1381 static int o2hb_debug_release(struct inode *inode, struct file *file) 1382 { 1383 return 0; 1384 } 1385 static ssize_t o2hb_debug_read(struct file *file, char __user *buf, 1386 size_t nbytes, loff_t *ppos) 1387 { 1388 return 0; 1389 } 1390 #endif /* CONFIG_DEBUG_FS */ 1391 1392 static const struct file_operations o2hb_debug_fops = { 1393 .open = o2hb_debug_open, 1394 .release = o2hb_debug_release, 1395 .read = o2hb_debug_read, 1396 .llseek = generic_file_llseek, 1397 }; 1398 1399 void o2hb_exit(void) 1400 { 1401 debugfs_remove(o2hb_debug_failedregions); 1402 debugfs_remove(o2hb_debug_quorumregions); 1403 debugfs_remove(o2hb_debug_liveregions); 1404 debugfs_remove(o2hb_debug_livenodes); 1405 debugfs_remove(o2hb_debug_dir); 1406 kfree(o2hb_db_livenodes); 1407 kfree(o2hb_db_liveregions); 1408 kfree(o2hb_db_quorumregions); 1409 kfree(o2hb_db_failedregions); 1410 } 1411 1412 static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir, 1413 struct o2hb_debug_buf **db, int db_len, 1414 int type, int size, int len, void *data) 1415 { 1416 *db = kmalloc(db_len, GFP_KERNEL); 1417 if (!*db) 1418 return NULL; 1419 1420 (*db)->db_type = type; 1421 (*db)->db_size = size; 1422 (*db)->db_len = len; 1423 (*db)->db_data = data; 1424 1425 return debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, 1426 &o2hb_debug_fops); 1427 } 1428 1429 static int o2hb_debug_init(void) 1430 { 1431 int ret = -ENOMEM; 1432 1433 o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL); 1434 if (!o2hb_debug_dir) { 1435 mlog_errno(ret); 1436 goto bail; 1437 } 1438 1439 o2hb_debug_livenodes = o2hb_debug_create(O2HB_DEBUG_LIVENODES, 1440 o2hb_debug_dir, 1441 &o2hb_db_livenodes, 1442 sizeof(*o2hb_db_livenodes), 1443 O2HB_DB_TYPE_LIVENODES, 1444 sizeof(o2hb_live_node_bitmap), 1445 O2NM_MAX_NODES, 1446 o2hb_live_node_bitmap); 1447 if (!o2hb_debug_livenodes) { 1448 mlog_errno(ret); 1449 goto bail; 1450 } 1451 1452 o2hb_debug_liveregions = o2hb_debug_create(O2HB_DEBUG_LIVEREGIONS, 1453 o2hb_debug_dir, 1454 &o2hb_db_liveregions, 1455 sizeof(*o2hb_db_liveregions), 1456 O2HB_DB_TYPE_LIVEREGIONS, 1457 sizeof(o2hb_live_region_bitmap), 1458 O2NM_MAX_REGIONS, 1459 o2hb_live_region_bitmap); 1460 if (!o2hb_debug_liveregions) { 1461 mlog_errno(ret); 1462 goto bail; 1463 } 1464 1465 o2hb_debug_quorumregions = 1466 o2hb_debug_create(O2HB_DEBUG_QUORUMREGIONS, 1467 o2hb_debug_dir, 1468 &o2hb_db_quorumregions, 1469 sizeof(*o2hb_db_quorumregions), 1470 O2HB_DB_TYPE_QUORUMREGIONS, 1471 sizeof(o2hb_quorum_region_bitmap), 1472 O2NM_MAX_REGIONS, 1473 o2hb_quorum_region_bitmap); 1474 if (!o2hb_debug_quorumregions) { 1475 mlog_errno(ret); 1476 goto bail; 1477 } 1478 1479 o2hb_debug_failedregions = 1480 o2hb_debug_create(O2HB_DEBUG_FAILEDREGIONS, 1481 o2hb_debug_dir, 1482 &o2hb_db_failedregions, 1483 sizeof(*o2hb_db_failedregions), 1484 O2HB_DB_TYPE_FAILEDREGIONS, 1485 sizeof(o2hb_failed_region_bitmap), 1486 O2NM_MAX_REGIONS, 1487 o2hb_failed_region_bitmap); 1488 if (!o2hb_debug_failedregions) { 1489 mlog_errno(ret); 1490 goto bail; 1491 } 1492 1493 ret = 0; 1494 bail: 1495 if (ret) 1496 o2hb_exit(); 1497 1498 return ret; 1499 } 1500 1501 int o2hb_init(void) 1502 { 1503 int i; 1504 1505 for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++) 1506 INIT_LIST_HEAD(&o2hb_callbacks[i].list); 1507 1508 for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++) 1509 INIT_LIST_HEAD(&o2hb_live_slots[i]); 1510 1511 INIT_LIST_HEAD(&o2hb_node_events); 1512 1513 memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap)); 1514 memset(o2hb_region_bitmap, 0, sizeof(o2hb_region_bitmap)); 1515 memset(o2hb_live_region_bitmap, 0, sizeof(o2hb_live_region_bitmap)); 1516 memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap)); 1517 memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap)); 1518 1519 o2hb_dependent_users = 0; 1520 1521 return o2hb_debug_init(); 1522 } 1523 1524 /* if we're already in a callback then we're already serialized by the sem */ 1525 static void o2hb_fill_node_map_from_callback(unsigned long *map, 1526 unsigned bytes) 1527 { 1528 BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); 1529 1530 memcpy(map, &o2hb_live_node_bitmap, bytes); 1531 } 1532 1533 /* 1534 * get a map of all nodes that are heartbeating in any regions 1535 */ 1536 void o2hb_fill_node_map(unsigned long *map, unsigned bytes) 1537 { 1538 /* callers want to serialize this map and callbacks so that they 1539 * can trust that they don't miss nodes coming to the party */ 1540 down_read(&o2hb_callback_sem); 1541 spin_lock(&o2hb_live_lock); 1542 o2hb_fill_node_map_from_callback(map, bytes); 1543 spin_unlock(&o2hb_live_lock); 1544 up_read(&o2hb_callback_sem); 1545 } 1546 EXPORT_SYMBOL_GPL(o2hb_fill_node_map); 1547 1548 /* 1549 * heartbeat configfs bits. The heartbeat set is a default set under 1550 * the cluster set in nodemanager.c. 1551 */ 1552 1553 static struct o2hb_region *to_o2hb_region(struct config_item *item) 1554 { 1555 return item ? container_of(item, struct o2hb_region, hr_item) : NULL; 1556 } 1557 1558 /* drop_item only drops its ref after killing the thread, nothing should 1559 * be using the region anymore. this has to clean up any state that 1560 * attributes might have built up. */ 1561 static void o2hb_region_release(struct config_item *item) 1562 { 1563 int i; 1564 struct page *page; 1565 struct o2hb_region *reg = to_o2hb_region(item); 1566 1567 mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name); 1568 1569 kfree(reg->hr_tmp_block); 1570 1571 if (reg->hr_slot_data) { 1572 for (i = 0; i < reg->hr_num_pages; i++) { 1573 page = reg->hr_slot_data[i]; 1574 if (page) 1575 __free_page(page); 1576 } 1577 kfree(reg->hr_slot_data); 1578 } 1579 1580 if (reg->hr_bdev) 1581 blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); 1582 1583 kfree(reg->hr_slots); 1584 1585 debugfs_remove(reg->hr_debug_livenodes); 1586 debugfs_remove(reg->hr_debug_regnum); 1587 debugfs_remove(reg->hr_debug_elapsed_time); 1588 debugfs_remove(reg->hr_debug_pinned); 1589 debugfs_remove(reg->hr_debug_dir); 1590 kfree(reg->hr_db_livenodes); 1591 kfree(reg->hr_db_regnum); 1592 kfree(reg->hr_db_elapsed_time); 1593 kfree(reg->hr_db_pinned); 1594 1595 spin_lock(&o2hb_live_lock); 1596 list_del(®->hr_all_item); 1597 spin_unlock(&o2hb_live_lock); 1598 1599 o2net_unregister_handler_list(®->hr_handler_list); 1600 kfree(reg); 1601 } 1602 1603 static int o2hb_read_block_input(struct o2hb_region *reg, 1604 const char *page, 1605 unsigned long *ret_bytes, 1606 unsigned int *ret_bits) 1607 { 1608 unsigned long bytes; 1609 char *p = (char *)page; 1610 1611 bytes = simple_strtoul(p, &p, 0); 1612 if (!p || (*p && (*p != '\n'))) 1613 return -EINVAL; 1614 1615 /* Heartbeat and fs min / max block sizes are the same. */ 1616 if (bytes > 4096 || bytes < 512) 1617 return -ERANGE; 1618 if (hweight16(bytes) != 1) 1619 return -EINVAL; 1620 1621 if (ret_bytes) 1622 *ret_bytes = bytes; 1623 if (ret_bits) 1624 *ret_bits = ffs(bytes) - 1; 1625 1626 return 0; 1627 } 1628 1629 static ssize_t o2hb_region_block_bytes_show(struct config_item *item, 1630 char *page) 1631 { 1632 return sprintf(page, "%u\n", to_o2hb_region(item)->hr_block_bytes); 1633 } 1634 1635 static ssize_t o2hb_region_block_bytes_store(struct config_item *item, 1636 const char *page, 1637 size_t count) 1638 { 1639 struct o2hb_region *reg = to_o2hb_region(item); 1640 int status; 1641 unsigned long block_bytes; 1642 unsigned int block_bits; 1643 1644 if (reg->hr_bdev) 1645 return -EINVAL; 1646 1647 status = o2hb_read_block_input(reg, page, &block_bytes, 1648 &block_bits); 1649 if (status) 1650 return status; 1651 1652 reg->hr_block_bytes = (unsigned int)block_bytes; 1653 reg->hr_block_bits = block_bits; 1654 1655 return count; 1656 } 1657 1658 static ssize_t o2hb_region_start_block_show(struct config_item *item, 1659 char *page) 1660 { 1661 return sprintf(page, "%llu\n", to_o2hb_region(item)->hr_start_block); 1662 } 1663 1664 static ssize_t o2hb_region_start_block_store(struct config_item *item, 1665 const char *page, 1666 size_t count) 1667 { 1668 struct o2hb_region *reg = to_o2hb_region(item); 1669 unsigned long long tmp; 1670 char *p = (char *)page; 1671 1672 if (reg->hr_bdev) 1673 return -EINVAL; 1674 1675 tmp = simple_strtoull(p, &p, 0); 1676 if (!p || (*p && (*p != '\n'))) 1677 return -EINVAL; 1678 1679 reg->hr_start_block = tmp; 1680 1681 return count; 1682 } 1683 1684 static ssize_t o2hb_region_blocks_show(struct config_item *item, char *page) 1685 { 1686 return sprintf(page, "%d\n", to_o2hb_region(item)->hr_blocks); 1687 } 1688 1689 static ssize_t o2hb_region_blocks_store(struct config_item *item, 1690 const char *page, 1691 size_t count) 1692 { 1693 struct o2hb_region *reg = to_o2hb_region(item); 1694 unsigned long tmp; 1695 char *p = (char *)page; 1696 1697 if (reg->hr_bdev) 1698 return -EINVAL; 1699 1700 tmp = simple_strtoul(p, &p, 0); 1701 if (!p || (*p && (*p != '\n'))) 1702 return -EINVAL; 1703 1704 if (tmp > O2NM_MAX_NODES || tmp == 0) 1705 return -ERANGE; 1706 1707 reg->hr_blocks = (unsigned int)tmp; 1708 1709 return count; 1710 } 1711 1712 static ssize_t o2hb_region_dev_show(struct config_item *item, char *page) 1713 { 1714 unsigned int ret = 0; 1715 1716 if (to_o2hb_region(item)->hr_bdev) 1717 ret = sprintf(page, "%s\n", to_o2hb_region(item)->hr_dev_name); 1718 1719 return ret; 1720 } 1721 1722 static void o2hb_init_region_params(struct o2hb_region *reg) 1723 { 1724 reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits; 1725 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; 1726 1727 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", 1728 reg->hr_start_block, reg->hr_blocks); 1729 mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n", 1730 reg->hr_block_bytes, reg->hr_block_bits); 1731 mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms); 1732 mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold); 1733 } 1734 1735 static int o2hb_map_slot_data(struct o2hb_region *reg) 1736 { 1737 int i, j; 1738 unsigned int last_slot; 1739 unsigned int spp = reg->hr_slots_per_page; 1740 struct page *page; 1741 char *raw; 1742 struct o2hb_disk_slot *slot; 1743 1744 reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL); 1745 if (reg->hr_tmp_block == NULL) 1746 return -ENOMEM; 1747 1748 reg->hr_slots = kcalloc(reg->hr_blocks, 1749 sizeof(struct o2hb_disk_slot), GFP_KERNEL); 1750 if (reg->hr_slots == NULL) 1751 return -ENOMEM; 1752 1753 for(i = 0; i < reg->hr_blocks; i++) { 1754 slot = ®->hr_slots[i]; 1755 slot->ds_node_num = i; 1756 INIT_LIST_HEAD(&slot->ds_live_item); 1757 slot->ds_raw_block = NULL; 1758 } 1759 1760 reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp; 1761 mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks " 1762 "at %u blocks per page\n", 1763 reg->hr_num_pages, reg->hr_blocks, spp); 1764 1765 reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *), 1766 GFP_KERNEL); 1767 if (!reg->hr_slot_data) 1768 return -ENOMEM; 1769 1770 for(i = 0; i < reg->hr_num_pages; i++) { 1771 page = alloc_page(GFP_KERNEL); 1772 if (!page) 1773 return -ENOMEM; 1774 1775 reg->hr_slot_data[i] = page; 1776 1777 last_slot = i * spp; 1778 raw = page_address(page); 1779 for (j = 0; 1780 (j < spp) && ((j + last_slot) < reg->hr_blocks); 1781 j++) { 1782 BUG_ON((j + last_slot) >= reg->hr_blocks); 1783 1784 slot = ®->hr_slots[j + last_slot]; 1785 slot->ds_raw_block = 1786 (struct o2hb_disk_heartbeat_block *) raw; 1787 1788 raw += reg->hr_block_bytes; 1789 } 1790 } 1791 1792 return 0; 1793 } 1794 1795 /* Read in all the slots available and populate the tracking 1796 * structures so that we can start with a baseline idea of what's 1797 * there. */ 1798 static int o2hb_populate_slot_data(struct o2hb_region *reg) 1799 { 1800 int ret, i; 1801 struct o2hb_disk_slot *slot; 1802 struct o2hb_disk_heartbeat_block *hb_block; 1803 1804 ret = o2hb_read_slots(reg, reg->hr_blocks); 1805 if (ret) 1806 goto out; 1807 1808 /* We only want to get an idea of the values initially in each 1809 * slot, so we do no verification - o2hb_check_slot will 1810 * actually determine if each configured slot is valid and 1811 * whether any values have changed. */ 1812 for(i = 0; i < reg->hr_blocks; i++) { 1813 slot = ®->hr_slots[i]; 1814 hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block; 1815 1816 /* Only fill the values that o2hb_check_slot uses to 1817 * determine changing slots */ 1818 slot->ds_last_time = le64_to_cpu(hb_block->hb_seq); 1819 slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); 1820 } 1821 1822 out: 1823 return ret; 1824 } 1825 1826 /* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */ 1827 static ssize_t o2hb_region_dev_store(struct config_item *item, 1828 const char *page, 1829 size_t count) 1830 { 1831 struct o2hb_region *reg = to_o2hb_region(item); 1832 struct task_struct *hb_task; 1833 long fd; 1834 int sectsize; 1835 char *p = (char *)page; 1836 struct fd f; 1837 struct inode *inode; 1838 ssize_t ret = -EINVAL; 1839 int live_threshold; 1840 1841 if (reg->hr_bdev) 1842 goto out; 1843 1844 /* We can't heartbeat without having had our node number 1845 * configured yet. */ 1846 if (o2nm_this_node() == O2NM_MAX_NODES) 1847 goto out; 1848 1849 fd = simple_strtol(p, &p, 0); 1850 if (!p || (*p && (*p != '\n'))) 1851 goto out; 1852 1853 if (fd < 0 || fd >= INT_MAX) 1854 goto out; 1855 1856 f = fdget(fd); 1857 if (f.file == NULL) 1858 goto out; 1859 1860 if (reg->hr_blocks == 0 || reg->hr_start_block == 0 || 1861 reg->hr_block_bytes == 0) 1862 goto out2; 1863 1864 inode = igrab(f.file->f_mapping->host); 1865 if (inode == NULL) 1866 goto out2; 1867 1868 if (!S_ISBLK(inode->i_mode)) 1869 goto out3; 1870 1871 reg->hr_bdev = I_BDEV(f.file->f_mapping->host); 1872 ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL); 1873 if (ret) { 1874 reg->hr_bdev = NULL; 1875 goto out3; 1876 } 1877 inode = NULL; 1878 1879 bdevname(reg->hr_bdev, reg->hr_dev_name); 1880 1881 sectsize = bdev_logical_block_size(reg->hr_bdev); 1882 if (sectsize != reg->hr_block_bytes) { 1883 mlog(ML_ERROR, 1884 "blocksize %u incorrect for device, expected %d", 1885 reg->hr_block_bytes, sectsize); 1886 ret = -EINVAL; 1887 goto out3; 1888 } 1889 1890 o2hb_init_region_params(reg); 1891 1892 /* Generation of zero is invalid */ 1893 do { 1894 get_random_bytes(®->hr_generation, 1895 sizeof(reg->hr_generation)); 1896 } while (reg->hr_generation == 0); 1897 1898 ret = o2hb_map_slot_data(reg); 1899 if (ret) { 1900 mlog_errno(ret); 1901 goto out3; 1902 } 1903 1904 ret = o2hb_populate_slot_data(reg); 1905 if (ret) { 1906 mlog_errno(ret); 1907 goto out3; 1908 } 1909 1910 INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); 1911 INIT_DELAYED_WORK(®->hr_nego_timeout_work, o2hb_nego_timeout); 1912 1913 /* 1914 * A node is considered live after it has beat LIVE_THRESHOLD 1915 * times. We're not steady until we've given them a chance 1916 * _after_ our first read. 1917 * The default threshold is bare minimum so as to limit the delay 1918 * during mounts. For global heartbeat, the threshold doubled for the 1919 * first region. 1920 */ 1921 live_threshold = O2HB_LIVE_THRESHOLD; 1922 if (o2hb_global_heartbeat_active()) { 1923 spin_lock(&o2hb_live_lock); 1924 if (bitmap_weight(o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) 1925 live_threshold <<= 1; 1926 spin_unlock(&o2hb_live_lock); 1927 } 1928 ++live_threshold; 1929 atomic_set(®->hr_steady_iterations, live_threshold); 1930 /* unsteady_iterations is triple the steady_iterations */ 1931 atomic_set(®->hr_unsteady_iterations, (live_threshold * 3)); 1932 1933 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", 1934 reg->hr_item.ci_name); 1935 if (IS_ERR(hb_task)) { 1936 ret = PTR_ERR(hb_task); 1937 mlog_errno(ret); 1938 goto out3; 1939 } 1940 1941 spin_lock(&o2hb_live_lock); 1942 reg->hr_task = hb_task; 1943 spin_unlock(&o2hb_live_lock); 1944 1945 ret = wait_event_interruptible(o2hb_steady_queue, 1946 atomic_read(®->hr_steady_iterations) == 0 || 1947 reg->hr_node_deleted); 1948 if (ret) { 1949 atomic_set(®->hr_steady_iterations, 0); 1950 reg->hr_aborted_start = 1; 1951 } 1952 1953 if (reg->hr_aborted_start) { 1954 ret = -EIO; 1955 goto out3; 1956 } 1957 1958 if (reg->hr_node_deleted) { 1959 ret = -EINVAL; 1960 goto out3; 1961 } 1962 1963 /* Ok, we were woken. Make sure it wasn't by drop_item() */ 1964 spin_lock(&o2hb_live_lock); 1965 hb_task = reg->hr_task; 1966 if (o2hb_global_heartbeat_active()) 1967 set_bit(reg->hr_region_num, o2hb_live_region_bitmap); 1968 spin_unlock(&o2hb_live_lock); 1969 1970 if (hb_task) 1971 ret = count; 1972 else 1973 ret = -EIO; 1974 1975 if (hb_task && o2hb_global_heartbeat_active()) 1976 printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n", 1977 config_item_name(®->hr_item), reg->hr_dev_name); 1978 1979 out3: 1980 iput(inode); 1981 out2: 1982 fdput(f); 1983 out: 1984 if (ret < 0) { 1985 if (reg->hr_bdev) { 1986 blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); 1987 reg->hr_bdev = NULL; 1988 } 1989 } 1990 return ret; 1991 } 1992 1993 static ssize_t o2hb_region_pid_show(struct config_item *item, char *page) 1994 { 1995 struct o2hb_region *reg = to_o2hb_region(item); 1996 pid_t pid = 0; 1997 1998 spin_lock(&o2hb_live_lock); 1999 if (reg->hr_task) 2000 pid = task_pid_nr(reg->hr_task); 2001 spin_unlock(&o2hb_live_lock); 2002 2003 if (!pid) 2004 return 0; 2005 2006 return sprintf(page, "%u\n", pid); 2007 } 2008 2009 CONFIGFS_ATTR(o2hb_region_, block_bytes); 2010 CONFIGFS_ATTR(o2hb_region_, start_block); 2011 CONFIGFS_ATTR(o2hb_region_, blocks); 2012 CONFIGFS_ATTR(o2hb_region_, dev); 2013 CONFIGFS_ATTR_RO(o2hb_region_, pid); 2014 2015 static struct configfs_attribute *o2hb_region_attrs[] = { 2016 &o2hb_region_attr_block_bytes, 2017 &o2hb_region_attr_start_block, 2018 &o2hb_region_attr_blocks, 2019 &o2hb_region_attr_dev, 2020 &o2hb_region_attr_pid, 2021 NULL, 2022 }; 2023 2024 static struct configfs_item_operations o2hb_region_item_ops = { 2025 .release = o2hb_region_release, 2026 }; 2027 2028 static const struct config_item_type o2hb_region_type = { 2029 .ct_item_ops = &o2hb_region_item_ops, 2030 .ct_attrs = o2hb_region_attrs, 2031 .ct_owner = THIS_MODULE, 2032 }; 2033 2034 /* heartbeat set */ 2035 2036 struct o2hb_heartbeat_group { 2037 struct config_group hs_group; 2038 /* some stuff? */ 2039 }; 2040 2041 static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group) 2042 { 2043 return group ? 2044 container_of(group, struct o2hb_heartbeat_group, hs_group) 2045 : NULL; 2046 } 2047 2048 static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir) 2049 { 2050 int ret = -ENOMEM; 2051 2052 reg->hr_debug_dir = 2053 debugfs_create_dir(config_item_name(®->hr_item), dir); 2054 if (!reg->hr_debug_dir) { 2055 mlog_errno(ret); 2056 goto bail; 2057 } 2058 2059 reg->hr_debug_livenodes = 2060 o2hb_debug_create(O2HB_DEBUG_LIVENODES, 2061 reg->hr_debug_dir, 2062 &(reg->hr_db_livenodes), 2063 sizeof(*(reg->hr_db_livenodes)), 2064 O2HB_DB_TYPE_REGION_LIVENODES, 2065 sizeof(reg->hr_live_node_bitmap), 2066 O2NM_MAX_NODES, reg); 2067 if (!reg->hr_debug_livenodes) { 2068 mlog_errno(ret); 2069 goto bail; 2070 } 2071 2072 reg->hr_debug_regnum = 2073 o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, 2074 reg->hr_debug_dir, 2075 &(reg->hr_db_regnum), 2076 sizeof(*(reg->hr_db_regnum)), 2077 O2HB_DB_TYPE_REGION_NUMBER, 2078 0, O2NM_MAX_NODES, reg); 2079 if (!reg->hr_debug_regnum) { 2080 mlog_errno(ret); 2081 goto bail; 2082 } 2083 2084 reg->hr_debug_elapsed_time = 2085 o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, 2086 reg->hr_debug_dir, 2087 &(reg->hr_db_elapsed_time), 2088 sizeof(*(reg->hr_db_elapsed_time)), 2089 O2HB_DB_TYPE_REGION_ELAPSED_TIME, 2090 0, 0, reg); 2091 if (!reg->hr_debug_elapsed_time) { 2092 mlog_errno(ret); 2093 goto bail; 2094 } 2095 2096 reg->hr_debug_pinned = 2097 o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, 2098 reg->hr_debug_dir, 2099 &(reg->hr_db_pinned), 2100 sizeof(*(reg->hr_db_pinned)), 2101 O2HB_DB_TYPE_REGION_PINNED, 2102 0, 0, reg); 2103 if (!reg->hr_debug_pinned) { 2104 mlog_errno(ret); 2105 goto bail; 2106 } 2107 2108 ret = 0; 2109 bail: 2110 return ret; 2111 } 2112 2113 static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group, 2114 const char *name) 2115 { 2116 struct o2hb_region *reg = NULL; 2117 int ret; 2118 2119 reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL); 2120 if (reg == NULL) 2121 return ERR_PTR(-ENOMEM); 2122 2123 if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) { 2124 ret = -ENAMETOOLONG; 2125 goto free; 2126 } 2127 2128 spin_lock(&o2hb_live_lock); 2129 reg->hr_region_num = 0; 2130 if (o2hb_global_heartbeat_active()) { 2131 reg->hr_region_num = find_first_zero_bit(o2hb_region_bitmap, 2132 O2NM_MAX_REGIONS); 2133 if (reg->hr_region_num >= O2NM_MAX_REGIONS) { 2134 spin_unlock(&o2hb_live_lock); 2135 ret = -EFBIG; 2136 goto free; 2137 } 2138 set_bit(reg->hr_region_num, o2hb_region_bitmap); 2139 } 2140 list_add_tail(®->hr_all_item, &o2hb_all_regions); 2141 spin_unlock(&o2hb_live_lock); 2142 2143 config_item_init_type_name(®->hr_item, name, &o2hb_region_type); 2144 2145 /* this is the same way to generate msg key as dlm, for local heartbeat, 2146 * name is also the same, so make initial crc value different to avoid 2147 * message key conflict. 2148 */ 2149 reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS, 2150 name, strlen(name)); 2151 INIT_LIST_HEAD(®->hr_handler_list); 2152 ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key, 2153 sizeof(struct o2hb_nego_msg), 2154 o2hb_nego_timeout_handler, 2155 reg, NULL, ®->hr_handler_list); 2156 if (ret) 2157 goto free; 2158 2159 ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key, 2160 sizeof(struct o2hb_nego_msg), 2161 o2hb_nego_approve_handler, 2162 reg, NULL, ®->hr_handler_list); 2163 if (ret) 2164 goto unregister_handler; 2165 2166 ret = o2hb_debug_region_init(reg, o2hb_debug_dir); 2167 if (ret) { 2168 config_item_put(®->hr_item); 2169 goto unregister_handler; 2170 } 2171 2172 return ®->hr_item; 2173 2174 unregister_handler: 2175 o2net_unregister_handler_list(®->hr_handler_list); 2176 free: 2177 kfree(reg); 2178 return ERR_PTR(ret); 2179 } 2180 2181 static void o2hb_heartbeat_group_drop_item(struct config_group *group, 2182 struct config_item *item) 2183 { 2184 struct task_struct *hb_task; 2185 struct o2hb_region *reg = to_o2hb_region(item); 2186 int quorum_region = 0; 2187 2188 /* stop the thread when the user removes the region dir */ 2189 spin_lock(&o2hb_live_lock); 2190 hb_task = reg->hr_task; 2191 reg->hr_task = NULL; 2192 reg->hr_item_dropped = 1; 2193 spin_unlock(&o2hb_live_lock); 2194 2195 if (hb_task) 2196 kthread_stop(hb_task); 2197 2198 if (o2hb_global_heartbeat_active()) { 2199 spin_lock(&o2hb_live_lock); 2200 clear_bit(reg->hr_region_num, o2hb_region_bitmap); 2201 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); 2202 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) 2203 quorum_region = 1; 2204 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); 2205 spin_unlock(&o2hb_live_lock); 2206 printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n", 2207 ((atomic_read(®->hr_steady_iterations) == 0) ? 2208 "stopped" : "start aborted"), config_item_name(item), 2209 reg->hr_dev_name); 2210 } 2211 2212 /* 2213 * If we're racing a dev_write(), we need to wake them. They will 2214 * check reg->hr_task 2215 */ 2216 if (atomic_read(®->hr_steady_iterations) != 0) { 2217 reg->hr_aborted_start = 1; 2218 atomic_set(®->hr_steady_iterations, 0); 2219 wake_up(&o2hb_steady_queue); 2220 } 2221 2222 config_item_put(item); 2223 2224 if (!o2hb_global_heartbeat_active() || !quorum_region) 2225 return; 2226 2227 /* 2228 * If global heartbeat active and there are dependent users, 2229 * pin all regions if quorum region count <= CUT_OFF 2230 */ 2231 spin_lock(&o2hb_live_lock); 2232 2233 if (!o2hb_dependent_users) 2234 goto unlock; 2235 2236 if (bitmap_weight(o2hb_quorum_region_bitmap, 2237 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) 2238 o2hb_region_pin(NULL); 2239 2240 unlock: 2241 spin_unlock(&o2hb_live_lock); 2242 } 2243 2244 static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item, 2245 char *page) 2246 { 2247 return sprintf(page, "%u\n", o2hb_dead_threshold); 2248 } 2249 2250 static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item, 2251 const char *page, size_t count) 2252 { 2253 unsigned long tmp; 2254 char *p = (char *)page; 2255 2256 tmp = simple_strtoul(p, &p, 10); 2257 if (!p || (*p && (*p != '\n'))) 2258 return -EINVAL; 2259 2260 /* this will validate ranges for us. */ 2261 o2hb_dead_threshold_set((unsigned int) tmp); 2262 2263 return count; 2264 } 2265 2266 static ssize_t o2hb_heartbeat_group_mode_show(struct config_item *item, 2267 char *page) 2268 { 2269 return sprintf(page, "%s\n", 2270 o2hb_heartbeat_mode_desc[o2hb_heartbeat_mode]); 2271 } 2272 2273 static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item, 2274 const char *page, size_t count) 2275 { 2276 unsigned int i; 2277 int ret; 2278 size_t len; 2279 2280 len = (page[count - 1] == '\n') ? count - 1 : count; 2281 if (!len) 2282 return -EINVAL; 2283 2284 for (i = 0; i < O2HB_HEARTBEAT_NUM_MODES; ++i) { 2285 if (strncasecmp(page, o2hb_heartbeat_mode_desc[i], len)) 2286 continue; 2287 2288 ret = o2hb_global_heartbeat_mode_set(i); 2289 if (!ret) 2290 printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n", 2291 o2hb_heartbeat_mode_desc[i]); 2292 return count; 2293 } 2294 2295 return -EINVAL; 2296 2297 } 2298 2299 CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold); 2300 CONFIGFS_ATTR(o2hb_heartbeat_group_, mode); 2301 2302 static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { 2303 &o2hb_heartbeat_group_attr_dead_threshold, 2304 &o2hb_heartbeat_group_attr_mode, 2305 NULL, 2306 }; 2307 2308 static struct configfs_group_operations o2hb_heartbeat_group_group_ops = { 2309 .make_item = o2hb_heartbeat_group_make_item, 2310 .drop_item = o2hb_heartbeat_group_drop_item, 2311 }; 2312 2313 static const struct config_item_type o2hb_heartbeat_group_type = { 2314 .ct_group_ops = &o2hb_heartbeat_group_group_ops, 2315 .ct_attrs = o2hb_heartbeat_group_attrs, 2316 .ct_owner = THIS_MODULE, 2317 }; 2318 2319 /* this is just here to avoid touching group in heartbeat.h which the 2320 * entire damn world #includes */ 2321 struct config_group *o2hb_alloc_hb_set(void) 2322 { 2323 struct o2hb_heartbeat_group *hs = NULL; 2324 struct config_group *ret = NULL; 2325 2326 hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL); 2327 if (hs == NULL) 2328 goto out; 2329 2330 config_group_init_type_name(&hs->hs_group, "heartbeat", 2331 &o2hb_heartbeat_group_type); 2332 2333 ret = &hs->hs_group; 2334 out: 2335 if (ret == NULL) 2336 kfree(hs); 2337 return ret; 2338 } 2339 2340 void o2hb_free_hb_set(struct config_group *group) 2341 { 2342 struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group); 2343 kfree(hs); 2344 } 2345 2346 /* hb callback registration and issuing */ 2347 2348 static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type) 2349 { 2350 if (type == O2HB_NUM_CB) 2351 return ERR_PTR(-EINVAL); 2352 2353 return &o2hb_callbacks[type]; 2354 } 2355 2356 void o2hb_setup_callback(struct o2hb_callback_func *hc, 2357 enum o2hb_callback_type type, 2358 o2hb_cb_func *func, 2359 void *data, 2360 int priority) 2361 { 2362 INIT_LIST_HEAD(&hc->hc_item); 2363 hc->hc_func = func; 2364 hc->hc_data = data; 2365 hc->hc_priority = priority; 2366 hc->hc_type = type; 2367 hc->hc_magic = O2HB_CB_MAGIC; 2368 } 2369 EXPORT_SYMBOL_GPL(o2hb_setup_callback); 2370 2371 /* 2372 * In local heartbeat mode, region_uuid passed matches the dlm domain name. 2373 * In global heartbeat mode, region_uuid passed is NULL. 2374 * 2375 * In local, we only pin the matching region. In global we pin all the active 2376 * regions. 2377 */ 2378 static int o2hb_region_pin(const char *region_uuid) 2379 { 2380 int ret = 0, found = 0; 2381 struct o2hb_region *reg; 2382 char *uuid; 2383 2384 assert_spin_locked(&o2hb_live_lock); 2385 2386 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { 2387 if (reg->hr_item_dropped) 2388 continue; 2389 2390 uuid = config_item_name(®->hr_item); 2391 2392 /* local heartbeat */ 2393 if (region_uuid) { 2394 if (strcmp(region_uuid, uuid)) 2395 continue; 2396 found = 1; 2397 } 2398 2399 if (reg->hr_item_pinned || reg->hr_item_dropped) 2400 goto skip_pin; 2401 2402 /* Ignore ENOENT only for local hb (userdlm domain) */ 2403 ret = o2nm_depend_item(®->hr_item); 2404 if (!ret) { 2405 mlog(ML_CLUSTER, "Pin region %s\n", uuid); 2406 reg->hr_item_pinned = 1; 2407 } else { 2408 if (ret == -ENOENT && found) 2409 ret = 0; 2410 else { 2411 mlog(ML_ERROR, "Pin region %s fails with %d\n", 2412 uuid, ret); 2413 break; 2414 } 2415 } 2416 skip_pin: 2417 if (found) 2418 break; 2419 } 2420 2421 return ret; 2422 } 2423 2424 /* 2425 * In local heartbeat mode, region_uuid passed matches the dlm domain name. 2426 * In global heartbeat mode, region_uuid passed is NULL. 2427 * 2428 * In local, we only unpin the matching region. In global we unpin all the 2429 * active regions. 2430 */ 2431 static void o2hb_region_unpin(const char *region_uuid) 2432 { 2433 struct o2hb_region *reg; 2434 char *uuid; 2435 int found = 0; 2436 2437 assert_spin_locked(&o2hb_live_lock); 2438 2439 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { 2440 if (reg->hr_item_dropped) 2441 continue; 2442 2443 uuid = config_item_name(®->hr_item); 2444 if (region_uuid) { 2445 if (strcmp(region_uuid, uuid)) 2446 continue; 2447 found = 1; 2448 } 2449 2450 if (reg->hr_item_pinned) { 2451 mlog(ML_CLUSTER, "Unpin region %s\n", uuid); 2452 o2nm_undepend_item(®->hr_item); 2453 reg->hr_item_pinned = 0; 2454 } 2455 if (found) 2456 break; 2457 } 2458 } 2459 2460 static int o2hb_region_inc_user(const char *region_uuid) 2461 { 2462 int ret = 0; 2463 2464 spin_lock(&o2hb_live_lock); 2465 2466 /* local heartbeat */ 2467 if (!o2hb_global_heartbeat_active()) { 2468 ret = o2hb_region_pin(region_uuid); 2469 goto unlock; 2470 } 2471 2472 /* 2473 * if global heartbeat active and this is the first dependent user, 2474 * pin all regions if quorum region count <= CUT_OFF 2475 */ 2476 o2hb_dependent_users++; 2477 if (o2hb_dependent_users > 1) 2478 goto unlock; 2479 2480 if (bitmap_weight(o2hb_quorum_region_bitmap, 2481 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) 2482 ret = o2hb_region_pin(NULL); 2483 2484 unlock: 2485 spin_unlock(&o2hb_live_lock); 2486 return ret; 2487 } 2488 2489 void o2hb_region_dec_user(const char *region_uuid) 2490 { 2491 spin_lock(&o2hb_live_lock); 2492 2493 /* local heartbeat */ 2494 if (!o2hb_global_heartbeat_active()) { 2495 o2hb_region_unpin(region_uuid); 2496 goto unlock; 2497 } 2498 2499 /* 2500 * if global heartbeat active and there are no dependent users, 2501 * unpin all quorum regions 2502 */ 2503 o2hb_dependent_users--; 2504 if (!o2hb_dependent_users) 2505 o2hb_region_unpin(NULL); 2506 2507 unlock: 2508 spin_unlock(&o2hb_live_lock); 2509 } 2510 2511 int o2hb_register_callback(const char *region_uuid, 2512 struct o2hb_callback_func *hc) 2513 { 2514 struct o2hb_callback_func *f; 2515 struct o2hb_callback *hbcall; 2516 int ret; 2517 2518 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); 2519 BUG_ON(!list_empty(&hc->hc_item)); 2520 2521 hbcall = hbcall_from_type(hc->hc_type); 2522 if (IS_ERR(hbcall)) { 2523 ret = PTR_ERR(hbcall); 2524 goto out; 2525 } 2526 2527 if (region_uuid) { 2528 ret = o2hb_region_inc_user(region_uuid); 2529 if (ret) { 2530 mlog_errno(ret); 2531 goto out; 2532 } 2533 } 2534 2535 down_write(&o2hb_callback_sem); 2536 2537 list_for_each_entry(f, &hbcall->list, hc_item) { 2538 if (hc->hc_priority < f->hc_priority) { 2539 list_add_tail(&hc->hc_item, &f->hc_item); 2540 break; 2541 } 2542 } 2543 if (list_empty(&hc->hc_item)) 2544 list_add_tail(&hc->hc_item, &hbcall->list); 2545 2546 up_write(&o2hb_callback_sem); 2547 ret = 0; 2548 out: 2549 mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n", 2550 ret, __builtin_return_address(0), hc); 2551 return ret; 2552 } 2553 EXPORT_SYMBOL_GPL(o2hb_register_callback); 2554 2555 void o2hb_unregister_callback(const char *region_uuid, 2556 struct o2hb_callback_func *hc) 2557 { 2558 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); 2559 2560 mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n", 2561 __builtin_return_address(0), hc); 2562 2563 /* XXX Can this happen _with_ a region reference? */ 2564 if (list_empty(&hc->hc_item)) 2565 return; 2566 2567 if (region_uuid) 2568 o2hb_region_dec_user(region_uuid); 2569 2570 down_write(&o2hb_callback_sem); 2571 2572 list_del_init(&hc->hc_item); 2573 2574 up_write(&o2hb_callback_sem); 2575 } 2576 EXPORT_SYMBOL_GPL(o2hb_unregister_callback); 2577 2578 int o2hb_check_node_heartbeating_no_sem(u8 node_num) 2579 { 2580 unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2581 2582 spin_lock(&o2hb_live_lock); 2583 o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map)); 2584 spin_unlock(&o2hb_live_lock); 2585 if (!test_bit(node_num, testing_map)) { 2586 mlog(ML_HEARTBEAT, 2587 "node (%u) does not have heartbeating enabled.\n", 2588 node_num); 2589 return 0; 2590 } 2591 2592 return 1; 2593 } 2594 EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_no_sem); 2595 2596 int o2hb_check_node_heartbeating_from_callback(u8 node_num) 2597 { 2598 unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2599 2600 o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map)); 2601 if (!test_bit(node_num, testing_map)) { 2602 mlog(ML_HEARTBEAT, 2603 "node (%u) does not have heartbeating enabled.\n", 2604 node_num); 2605 return 0; 2606 } 2607 2608 return 1; 2609 } 2610 EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback); 2611 2612 /* 2613 * this is just a hack until we get the plumbing which flips file systems 2614 * read only and drops the hb ref instead of killing the node dead. 2615 */ 2616 void o2hb_stop_all_regions(void) 2617 { 2618 struct o2hb_region *reg; 2619 2620 mlog(ML_ERROR, "stopping heartbeat on all active regions.\n"); 2621 2622 spin_lock(&o2hb_live_lock); 2623 2624 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) 2625 reg->hr_unclean_stop = 1; 2626 2627 spin_unlock(&o2hb_live_lock); 2628 } 2629 EXPORT_SYMBOL_GPL(o2hb_stop_all_regions); 2630 2631 int o2hb_get_all_regions(char *region_uuids, u8 max_regions) 2632 { 2633 struct o2hb_region *reg; 2634 int numregs = 0; 2635 char *p; 2636 2637 spin_lock(&o2hb_live_lock); 2638 2639 p = region_uuids; 2640 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { 2641 if (reg->hr_item_dropped) 2642 continue; 2643 2644 mlog(0, "Region: %s\n", config_item_name(®->hr_item)); 2645 if (numregs < max_regions) { 2646 memcpy(p, config_item_name(®->hr_item), 2647 O2HB_MAX_REGION_NAME_LEN); 2648 p += O2HB_MAX_REGION_NAME_LEN; 2649 } 2650 numregs++; 2651 } 2652 2653 spin_unlock(&o2hb_live_lock); 2654 2655 return numregs; 2656 } 2657 EXPORT_SYMBOL_GPL(o2hb_get_all_regions); 2658 2659 int o2hb_global_heartbeat_active(void) 2660 { 2661 return (o2hb_heartbeat_mode == O2HB_HEARTBEAT_GLOBAL); 2662 } 2663 EXPORT_SYMBOL(o2hb_global_heartbeat_active); 2664