Lines Matching +full:build +full:- +full:system +full:- +full:flaky

1 // SPDX-License-Identifier: GPL-2.0-or-later
52 * - o2hb_region_bitmap allows us to limit the region number to max region.
53 * - o2hb_live_region_bitmap tracks live regions (seen steady iterations).
54 * - o2hb_quorum_region_bitmap tracks live regions that have seen all nodes
56 * - o2hb_failed_region_bitmap tracks the regions that have seen io timeouts.
134 * region uuid. This is true for domains created for the file system but not
138 * works for both file system and userdlm domains.
160 int ret = -1; in o2hb_global_heartbeat_mode_set()
289 "milliseconds\n", reg->hr_bdev, in o2hb_write_timeout()
290 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); in o2hb_write_timeout()
294 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) in o2hb_write_timeout()
295 set_bit(reg->hr_region_num, o2hb_failed_region_bitmap); in o2hb_write_timeout()
319 if (atomic_read(&reg->hr_steady_iterations) != 0) in o2hb_arm_timeout()
327 clear_bit(reg->hr_region_num, o2hb_failed_region_bitmap); in o2hb_arm_timeout()
330 cancel_delayed_work(&reg->hr_write_timeout_work); in o2hb_arm_timeout()
331 schedule_delayed_work(&reg->hr_write_timeout_work, in o2hb_arm_timeout()
334 cancel_delayed_work(&reg->hr_nego_timeout_work); in o2hb_arm_timeout()
336 schedule_delayed_work(&reg->hr_nego_timeout_work, in o2hb_arm_timeout()
338 bitmap_zero(reg->hr_nego_node_bitmap, O2NM_MAX_NODES); in o2hb_arm_timeout()
343 cancel_delayed_work_sync(&reg->hr_write_timeout_work); in o2hb_disarm_timeout()
344 cancel_delayed_work_sync(&reg->hr_nego_timeout_work); in o2hb_disarm_timeout()
357 if (ret == -EAGAIN || ret == -ENOMEM) { in o2hb_send_nego_msg()
375 if (reg->hr_last_hb_status) in o2hb_nego_timeout()
383 if (!test_bit(master_node, reg->hr_nego_node_bitmap)) { in o2hb_nego_timeout()
386 config_item_name(&reg->hr_item), reg->hr_bdev); in o2hb_nego_timeout()
387 set_bit(master_node, reg->hr_nego_node_bitmap); in o2hb_nego_timeout()
389 if (!bitmap_equal(reg->hr_nego_node_bitmap, live_node_bitmap, in o2hb_nego_timeout()
394 schedule_delayed_work(&reg->hr_nego_timeout_work, in o2hb_nego_timeout()
401 config_item_name(&reg->hr_item), reg->hr_bdev); in o2hb_nego_timeout()
405 i = -1; in o2hb_nego_timeout()
412 ret = o2hb_send_nego_msg(reg->hr_key, in o2hb_nego_timeout()
421 o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(&reg->hr_item), in o2hb_nego_timeout()
422 reg->hr_bdev, master_node); in o2hb_nego_timeout()
423 ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG, in o2hb_nego_timeout()
437 nego_msg = (struct o2hb_nego_msg *)msg->buf; in o2hb_nego_timeout_handler()
439 nego_msg->node_num, config_item_name(&reg->hr_item), reg->hr_bdev); in o2hb_nego_timeout_handler()
440 if (nego_msg->node_num < O2NM_MAX_NODES) in o2hb_nego_timeout_handler()
441 set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap); in o2hb_nego_timeout_handler()
454 config_item_name(&reg->hr_item), reg->hr_bdev); in o2hb_nego_approve_handler()
461 atomic_set(&wc->wc_num_reqs, 1); in o2hb_bio_wait_init()
462 init_completion(&wc->wc_io_complete); in o2hb_bio_wait_init()
463 wc->wc_error = 0; in o2hb_bio_wait_init()
472 while(num--) { in o2hb_bio_wait_dec()
473 if (atomic_dec_and_test(&wc->wc_num_reqs)) { in o2hb_bio_wait_dec()
475 complete(&wc->wc_io_complete); in o2hb_bio_wait_dec()
483 wait_for_completion(&wc->wc_io_complete); in o2hb_wait_on_io()
488 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; in o2hb_bio_end_io()
490 if (bio->bi_status) { in o2hb_bio_end_io()
491 mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); in o2hb_bio_end_io()
492 wc->wc_error = blk_status_to_errno(bio->bi_status); in o2hb_bio_end_io()
508 unsigned int bits = reg->hr_block_bits; in o2hb_setup_one_bio()
509 unsigned int spp = reg->hr_slots_per_page; in o2hb_setup_one_bio()
516 * nicest if we could pre-allocate these bios and avoid this in o2hb_setup_one_bio()
518 bio = bio_alloc(reg->hr_bdev, 16, opf, GFP_ATOMIC); in o2hb_setup_one_bio()
521 bio = ERR_PTR(-ENOMEM); in o2hb_setup_one_bio()
526 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); in o2hb_setup_one_bio()
527 bio->bi_private = wc; in o2hb_setup_one_bio()
528 bio->bi_end_io = o2hb_bio_end_io; in o2hb_setup_one_bio()
533 page = reg->hr_slot_data[current_page]; in o2hb_setup_one_bio()
535 vec_len = min(PAGE_SIZE - vec_start, in o2hb_setup_one_bio()
536 (max_slots-cs) * (PAGE_SIZE/spp) ); in o2hb_setup_one_bio()
606 atomic_inc(&write_wc->wc_num_reqs); in o2hb_issue_node_write()
623 old_cksum = hb_block->hb_cksum; in o2hb_compute_block_crc_le()
624 hb_block->hb_cksum = 0; in o2hb_compute_block_crc_le()
626 ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes); in o2hb_compute_block_crc_le()
628 hb_block->hb_cksum = old_cksum; in o2hb_compute_block_crc_le()
637 (long long)le64_to_cpu(hb_block->hb_seq), in o2hb_dump_slot()
638 hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum), in o2hb_dump_slot()
639 (long long)le64_to_cpu(hb_block->hb_generation)); in o2hb_dump_slot()
647 read = le32_to_cpu(hb_block->hb_cksum); in o2hb_verify_crc()
657 * flaky device that is losing writes, etc.
666 slot = &reg->hr_slots[o2nm_this_node()]; in o2hb_check_own_slot()
668 if (!slot->ds_last_time) in o2hb_check_own_slot()
671 hb_block = slot->ds_raw_block; in o2hb_check_own_slot()
672 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && in o2hb_check_own_slot()
673 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && in o2hb_check_own_slot()
674 hb_block->hb_node == slot->ds_node_num) in o2hb_check_own_slot()
681 if (hb_block->hb_node != slot->ds_node_num) in o2hb_check_own_slot()
683 else if (le64_to_cpu(hb_block->hb_generation) != in o2hb_check_own_slot()
684 slot->ds_last_generation) in o2hb_check_own_slot()
690 "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_bdev, in o2hb_check_own_slot()
691 slot->ds_node_num, (unsigned long long)slot->ds_last_generation, in o2hb_check_own_slot()
692 (unsigned long long)slot->ds_last_time, hb_block->hb_node, in o2hb_check_own_slot()
693 (unsigned long long)le64_to_cpu(hb_block->hb_generation), in o2hb_check_own_slot()
694 (unsigned long long)le64_to_cpu(hb_block->hb_seq)); in o2hb_check_own_slot()
708 slot = &reg->hr_slots[node_num]; in o2hb_prepare_block()
710 hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block; in o2hb_prepare_block()
711 memset(hb_block, 0, reg->hr_block_bytes); in o2hb_prepare_block()
717 hb_block->hb_seq = cpu_to_le64(cputime); in o2hb_prepare_block()
718 hb_block->hb_node = node_num; in o2hb_prepare_block()
719 hb_block->hb_generation = cpu_to_le64(generation); in o2hb_prepare_block()
720 hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); in o2hb_prepare_block()
723 hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, in o2hb_prepare_block()
728 le32_to_cpu(hb_block->hb_cksum)); in o2hb_prepare_block()
737 list_for_each_entry(f, &hbcall->list, hc_item) { in o2hb_fire_callbacks()
739 (f->hc_func)(node, idx, f->hc_data); in o2hb_fire_callbacks()
756 && !list_empty(&queued_event->hn_item)) { in o2hb_run_event_list()
760 list_del_init(&event->hn_item); in o2hb_run_event_list()
764 event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", in o2hb_run_event_list()
765 event->hn_node_num); in o2hb_run_event_list()
767 hbcall = hbcall_from_type(event->hn_event_type); in o2hb_run_event_list()
774 o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); in o2hb_run_event_list()
792 event->hn_event_type = type; in o2hb_queue_node_event()
793 event->hn_node = node; in o2hb_queue_node_event()
794 event->hn_node_num = node_num; in o2hb_queue_node_event()
799 list_add_tail(&event->hn_item, &o2hb_node_events); in o2hb_queue_node_event()
809 node = o2nm_get_node_by_num(slot->ds_node_num); in o2hb_shutdown_slot()
814 if (!list_empty(&slot->ds_live_item)) { in o2hb_shutdown_slot()
816 slot->ds_node_num); in o2hb_shutdown_slot()
818 list_del_init(&slot->ds_live_item); in o2hb_shutdown_slot()
820 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { in o2hb_shutdown_slot()
821 clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); in o2hb_shutdown_slot()
824 slot->ds_node_num); in o2hb_shutdown_slot()
846 if (atomic_read(&reg->hr_steady_iterations) != 0) in o2hb_set_quorum_device()
851 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) in o2hb_set_quorum_device()
859 if (!bitmap_equal(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, in o2hb_set_quorum_device()
864 config_item_name(&reg->hr_item), reg->hr_bdev); in o2hb_set_quorum_device()
866 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); in o2hb_set_quorum_device()
886 struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; in o2hb_check_slot()
893 memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); in o2hb_check_slot()
899 node = o2nm_get_node_by_num(slot->ds_node_num); in o2hb_check_slot()
902 tmp = test_bit(slot->ds_node_num, o2hb_live_node_bitmap); in o2hb_check_slot()
913 /* Don't print an error on the console in this case - in o2hb_check_slot()
916 if (list_empty(&slot->ds_live_item)) in o2hb_check_slot()
923 slot->ds_node_num, reg->hr_bdev); in o2hb_check_slot()
926 slot->ds_equal_samples++; in o2hb_check_slot()
932 cputime = le64_to_cpu(hb_block->hb_seq); in o2hb_check_slot()
933 if (slot->ds_last_time != cputime) in o2hb_check_slot()
934 slot->ds_changed_samples++; in o2hb_check_slot()
936 slot->ds_equal_samples++; in o2hb_check_slot()
937 slot->ds_last_time = cputime; in o2hb_check_slot()
942 * to lose any changed_samples state we might build up to in o2hb_check_slot()
944 if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) { in o2hb_check_slot()
946 slot->ds_equal_samples = 0; in o2hb_check_slot()
948 "to 0x%llx)\n", slot->ds_node_num, in o2hb_check_slot()
949 (long long)slot->ds_last_generation, in o2hb_check_slot()
950 (long long)le64_to_cpu(hb_block->hb_generation)); in o2hb_check_slot()
953 slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); in o2hb_check_slot()
957 slot->ds_node_num, (long long)slot->ds_last_generation, in o2hb_check_slot()
958 le32_to_cpu(hb_block->hb_cksum), in o2hb_check_slot()
959 (unsigned long long)le64_to_cpu(hb_block->hb_seq), in o2hb_check_slot()
960 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, in o2hb_check_slot()
961 slot->ds_equal_samples); in o2hb_check_slot()
968 if (list_empty(&slot->ds_live_item) && in o2hb_check_slot()
969 slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) { in o2hb_check_slot()
971 slot->ds_node_num, (long long)slot->ds_last_generation); in o2hb_check_slot()
973 set_bit(slot->ds_node_num, reg->hr_live_node_bitmap); in o2hb_check_slot()
976 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { in o2hb_check_slot()
978 "bitmap\n", slot->ds_node_num); in o2hb_check_slot()
979 set_bit(slot->ds_node_num, o2hb_live_node_bitmap); in o2hb_check_slot()
982 slot->ds_node_num); in o2hb_check_slot()
988 list_add_tail(&slot->ds_live_item, in o2hb_check_slot()
989 &o2hb_live_slots[slot->ds_node_num]); in o2hb_check_slot()
991 slot->ds_equal_samples = 0; in o2hb_check_slot()
995 * considered dead. The self-fencing timeout is in o2hb_check_slot()
998 * hasn't self-fenced yet. */ in o2hb_check_slot()
999 slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); in o2hb_check_slot()
1006 slot->ds_node_num, reg->hr_bdev, slot_dead_ms, in o2hb_check_slot()
1013 if (list_empty(&slot->ds_live_item)) in o2hb_check_slot()
1019 if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { in o2hb_check_slot()
1021 slot->ds_node_num); in o2hb_check_slot()
1023 clear_bit(slot->ds_node_num, reg->hr_live_node_bitmap); in o2hb_check_slot()
1026 list_del_init(&slot->ds_live_item); in o2hb_check_slot()
1027 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { in o2hb_check_slot()
1029 "nodes bitmap\n", slot->ds_node_num); in o2hb_check_slot()
1030 clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); in o2hb_check_slot()
1034 node, slot->ds_node_num); in o2hb_check_slot()
1043 slot->ds_changed_samples = 0; in o2hb_check_slot()
1046 if (slot->ds_changed_samples) { in o2hb_check_slot()
1047 slot->ds_changed_samples = 0; in o2hb_check_slot()
1048 slot->ds_equal_samples = 0; in o2hb_check_slot()
1091 i = -1; in o2hb_do_disk_heartbeat()
1101 ret = -EINVAL; in o2hb_do_disk_heartbeat()
1108 * best-effort. */ in o2hb_do_disk_heartbeat()
1121 o2hb_prepare_block(reg, reg->hr_generation); in o2hb_do_disk_heartbeat()
1129 i = -1; in o2hb_do_disk_heartbeat()
1132 membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]); in o2hb_do_disk_heartbeat()
1142 /* Do not re-arm the write timeout on I/O error - we in o2hb_do_disk_heartbeat()
1146 write_wc.wc_error, reg->hr_bdev); in o2hb_do_disk_heartbeat()
1155 reg->hr_last_timeout_start = jiffies; in o2hb_do_disk_heartbeat()
1160 if (atomic_read(&reg->hr_steady_iterations) != 0) { in o2hb_do_disk_heartbeat()
1162 if (atomic_dec_and_test(&reg->hr_steady_iterations)) in o2hb_do_disk_heartbeat()
1167 if (atomic_read(&reg->hr_steady_iterations) != 0) { in o2hb_do_disk_heartbeat()
1168 if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) { in o2hb_do_disk_heartbeat()
1171 config_item_name(&reg->hr_item), in o2hb_do_disk_heartbeat()
1172 reg->hr_bdev); in o2hb_do_disk_heartbeat()
1173 atomic_set(&reg->hr_steady_iterations, 0); in o2hb_do_disk_heartbeat()
1174 reg->hr_aborted_start = 1; in o2hb_do_disk_heartbeat()
1176 ret = -EIO; in o2hb_do_disk_heartbeat()
1204 reg->hr_node_deleted = 1; in o2hb_thread()
1210 !reg->hr_unclean_stop && !reg->hr_aborted_start) { in o2hb_thread()
1219 reg->hr_last_hb_status = ret; in o2hb_thread()
1231 elapsed_msec < reg->hr_timeout_ms) { in o2hb_thread()
1234 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); in o2hb_thread()
1241 for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++) in o2hb_thread()
1242 o2hb_shutdown_slot(&reg->hr_slots[i]); in o2hb_thread()
1244 /* Explicit down notification - avoid forcing the other nodes in o2hb_thread()
1246 * write a clear generation - thus indicating to them that in o2hb_thread()
1249 if (!reg->hr_unclean_stop && !reg->hr_aborted_start) { in o2hb_thread()
1269 struct o2hb_debug_buf *db = inode->i_private; in o2hb_debug_open()
1274 int i = -1; in o2hb_debug_open()
1278 BUG_ON(sizeof(map) < db->db_size); in o2hb_debug_open()
1284 switch (db->db_type) { in o2hb_debug_open()
1290 memcpy(map, db->db_data, db->db_size); in o2hb_debug_open()
1296 reg = (struct o2hb_region *)db->db_data; in o2hb_debug_open()
1297 memcpy(map, reg->hr_live_node_bitmap, db->db_size); in o2hb_debug_open()
1302 reg = (struct o2hb_region *)db->db_data; in o2hb_debug_open()
1303 out += scnprintf(buf + out, PAGE_SIZE - out, "%d\n", in o2hb_debug_open()
1304 reg->hr_region_num); in o2hb_debug_open()
1308 reg = (struct o2hb_region *)db->db_data; in o2hb_debug_open()
1309 lts = reg->hr_last_timeout_start; in o2hb_debug_open()
1312 lts = jiffies_to_msecs(jiffies - lts); in o2hb_debug_open()
1313 out += scnprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts); in o2hb_debug_open()
1317 reg = (struct o2hb_region *)db->db_data; in o2hb_debug_open()
1318 out += scnprintf(buf + out, PAGE_SIZE - out, "%u\n", in o2hb_debug_open()
1319 !!reg->hr_item_pinned); in o2hb_debug_open()
1326 while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len) in o2hb_debug_open()
1327 out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i); in o2hb_debug_open()
1328 out += scnprintf(buf + out, PAGE_SIZE - out, "\n"); in o2hb_debug_open()
1333 file->private_data = buf; in o2hb_debug_open()
1337 return -ENOMEM; in o2hb_debug_open()
1342 kfree(file->private_data); in o2hb_debug_release()
1349 return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, in o2hb_debug_read()
1350 i_size_read(file->f_mapping->host)); in o2hb_debug_read()
1392 (*db)->db_type = type; in o2hb_debug_create()
1393 (*db)->db_size = size; in o2hb_debug_create()
1394 (*db)->db_len = len; in o2hb_debug_create()
1395 (*db)->db_data = data; in o2hb_debug_create()
1492 mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg->hr_bdev); in o2hb_region_release()
1494 kfree(reg->hr_tmp_block); in o2hb_region_release()
1496 if (reg->hr_slot_data) { in o2hb_region_release()
1497 for (i = 0; i < reg->hr_num_pages; i++) { in o2hb_region_release()
1498 page = reg->hr_slot_data[i]; in o2hb_region_release()
1502 kfree(reg->hr_slot_data); in o2hb_region_release()
1505 if (reg->hr_bdev) in o2hb_region_release()
1506 blkdev_put(reg->hr_bdev, NULL); in o2hb_region_release()
1508 kfree(reg->hr_slots); in o2hb_region_release()
1510 debugfs_remove_recursive(reg->hr_debug_dir); in o2hb_region_release()
1511 kfree(reg->hr_db_livenodes); in o2hb_region_release()
1512 kfree(reg->hr_db_regnum); in o2hb_region_release()
1513 kfree(reg->hr_db_elapsed_time); in o2hb_region_release()
1514 kfree(reg->hr_db_pinned); in o2hb_region_release()
1517 list_del(&reg->hr_all_item); in o2hb_region_release()
1520 o2net_unregister_handler_list(&reg->hr_handler_list); in o2hb_region_release()
1534 return -EINVAL; in o2hb_read_block_input()
1538 return -ERANGE; in o2hb_read_block_input()
1540 return -EINVAL; in o2hb_read_block_input()
1545 *ret_bits = ffs(bytes) - 1; in o2hb_read_block_input()
1553 return sprintf(page, "%u\n", to_o2hb_region(item)->hr_block_bytes); in o2hb_region_block_bytes_show()
1565 if (reg->hr_bdev) in o2hb_region_block_bytes_store()
1566 return -EINVAL; in o2hb_region_block_bytes_store()
1573 reg->hr_block_bytes = (unsigned int)block_bytes; in o2hb_region_block_bytes_store()
1574 reg->hr_block_bits = block_bits; in o2hb_region_block_bytes_store()
1582 return sprintf(page, "%llu\n", to_o2hb_region(item)->hr_start_block); in o2hb_region_start_block_show()
1594 if (reg->hr_bdev) in o2hb_region_start_block_store()
1595 return -EINVAL; in o2hb_region_start_block_store()
1599 return -EINVAL; in o2hb_region_start_block_store()
1601 reg->hr_start_block = tmp; in o2hb_region_start_block_store()
1608 return sprintf(page, "%d\n", to_o2hb_region(item)->hr_blocks); in o2hb_region_blocks_show()
1619 if (reg->hr_bdev) in o2hb_region_blocks_store()
1620 return -EINVAL; in o2hb_region_blocks_store()
1624 return -EINVAL; in o2hb_region_blocks_store()
1627 return -ERANGE; in o2hb_region_blocks_store()
1629 reg->hr_blocks = (unsigned int)tmp; in o2hb_region_blocks_store()
1638 if (to_o2hb_region(item)->hr_bdev) in o2hb_region_dev_show()
1639 ret = sprintf(page, "%pg\n", to_o2hb_region(item)->hr_bdev); in o2hb_region_dev_show()
1646 reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits; in o2hb_init_region_params()
1647 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; in o2hb_init_region_params()
1650 reg->hr_start_block, reg->hr_blocks); in o2hb_init_region_params()
1652 reg->hr_block_bytes, reg->hr_block_bits); in o2hb_init_region_params()
1653 mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms); in o2hb_init_region_params()
1661 unsigned int spp = reg->hr_slots_per_page; in o2hb_map_slot_data()
1666 reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL); in o2hb_map_slot_data()
1667 if (reg->hr_tmp_block == NULL) in o2hb_map_slot_data()
1668 return -ENOMEM; in o2hb_map_slot_data()
1670 reg->hr_slots = kcalloc(reg->hr_blocks, in o2hb_map_slot_data()
1672 if (reg->hr_slots == NULL) in o2hb_map_slot_data()
1673 return -ENOMEM; in o2hb_map_slot_data()
1675 for(i = 0; i < reg->hr_blocks; i++) { in o2hb_map_slot_data()
1676 slot = &reg->hr_slots[i]; in o2hb_map_slot_data()
1677 slot->ds_node_num = i; in o2hb_map_slot_data()
1678 INIT_LIST_HEAD(&slot->ds_live_item); in o2hb_map_slot_data()
1679 slot->ds_raw_block = NULL; in o2hb_map_slot_data()
1682 reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp; in o2hb_map_slot_data()
1685 reg->hr_num_pages, reg->hr_blocks, spp); in o2hb_map_slot_data()
1687 reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *), in o2hb_map_slot_data()
1689 if (!reg->hr_slot_data) in o2hb_map_slot_data()
1690 return -ENOMEM; in o2hb_map_slot_data()
1692 for(i = 0; i < reg->hr_num_pages; i++) { in o2hb_map_slot_data()
1695 return -ENOMEM; in o2hb_map_slot_data()
1697 reg->hr_slot_data[i] = page; in o2hb_map_slot_data()
1702 (j < spp) && ((j + last_slot) < reg->hr_blocks); in o2hb_map_slot_data()
1704 BUG_ON((j + last_slot) >= reg->hr_blocks); in o2hb_map_slot_data()
1706 slot = &reg->hr_slots[j + last_slot]; in o2hb_map_slot_data()
1707 slot->ds_raw_block = in o2hb_map_slot_data()
1710 raw += reg->hr_block_bytes; in o2hb_map_slot_data()
1726 ret = o2hb_read_slots(reg, 0, reg->hr_blocks); in o2hb_populate_slot_data()
1731 * slot, so we do no verification - o2hb_check_slot will in o2hb_populate_slot_data()
1734 for(i = 0; i < reg->hr_blocks; i++) { in o2hb_populate_slot_data()
1735 slot = &reg->hr_slots[i]; in o2hb_populate_slot_data()
1736 hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block; in o2hb_populate_slot_data()
1740 slot->ds_last_time = le64_to_cpu(hb_block->hb_seq); in o2hb_populate_slot_data()
1741 slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); in o2hb_populate_slot_data()
1759 ssize_t ret = -EINVAL; in o2hb_region_dev_store()
1762 if (reg->hr_bdev) in o2hb_region_dev_store()
1781 if (reg->hr_blocks == 0 || reg->hr_start_block == 0 || in o2hb_region_dev_store()
1782 reg->hr_block_bytes == 0) in o2hb_region_dev_store()
1785 if (!S_ISBLK(f.file->f_mapping->host->i_mode)) in o2hb_region_dev_store()
1788 reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev, in o2hb_region_dev_store()
1791 if (IS_ERR(reg->hr_bdev)) { in o2hb_region_dev_store()
1792 ret = PTR_ERR(reg->hr_bdev); in o2hb_region_dev_store()
1793 reg->hr_bdev = NULL; in o2hb_region_dev_store()
1797 sectsize = bdev_logical_block_size(reg->hr_bdev); in o2hb_region_dev_store()
1798 if (sectsize != reg->hr_block_bytes) { in o2hb_region_dev_store()
1801 reg->hr_block_bytes, sectsize); in o2hb_region_dev_store()
1802 ret = -EINVAL; in o2hb_region_dev_store()
1810 get_random_bytes(&reg->hr_generation, in o2hb_region_dev_store()
1811 sizeof(reg->hr_generation)); in o2hb_region_dev_store()
1812 } while (reg->hr_generation == 0); in o2hb_region_dev_store()
1826 INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout); in o2hb_region_dev_store()
1827 INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout); in o2hb_region_dev_store()
1845 atomic_set(&reg->hr_steady_iterations, live_threshold); in o2hb_region_dev_store()
1847 atomic_set(&reg->hr_unsteady_iterations, (live_threshold * 3)); in o2hb_region_dev_store()
1849 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", in o2hb_region_dev_store()
1850 reg->hr_item.ci_name); in o2hb_region_dev_store()
1858 reg->hr_task = hb_task; in o2hb_region_dev_store()
1862 atomic_read(&reg->hr_steady_iterations) == 0 || in o2hb_region_dev_store()
1863 reg->hr_node_deleted); in o2hb_region_dev_store()
1865 atomic_set(&reg->hr_steady_iterations, 0); in o2hb_region_dev_store()
1866 reg->hr_aborted_start = 1; in o2hb_region_dev_store()
1869 if (reg->hr_aborted_start) { in o2hb_region_dev_store()
1870 ret = -EIO; in o2hb_region_dev_store()
1874 if (reg->hr_node_deleted) { in o2hb_region_dev_store()
1875 ret = -EINVAL; in o2hb_region_dev_store()
1881 hb_task = reg->hr_task; in o2hb_region_dev_store()
1883 set_bit(reg->hr_region_num, o2hb_live_region_bitmap); in o2hb_region_dev_store()
1889 ret = -EIO; in o2hb_region_dev_store()
1893 config_item_name(&reg->hr_item), reg->hr_bdev); in o2hb_region_dev_store()
1897 blkdev_put(reg->hr_bdev, NULL); in o2hb_region_dev_store()
1898 reg->hr_bdev = NULL; in o2hb_region_dev_store()
1912 if (reg->hr_task) in o2hb_region_pid_show()
1913 pid = task_pid_nr(reg->hr_task); in o2hb_region_pid_show()
1966 dir = debugfs_create_dir(config_item_name(&reg->hr_item), parent); in o2hb_debug_region_init()
1967 reg->hr_debug_dir = dir; in o2hb_debug_region_init()
1969 o2hb_debug_create(O2HB_DEBUG_LIVENODES, dir, &(reg->hr_db_livenodes), in o2hb_debug_region_init()
1970 sizeof(*(reg->hr_db_livenodes)), in o2hb_debug_region_init()
1972 sizeof(reg->hr_live_node_bitmap), O2NM_MAX_NODES, in o2hb_debug_region_init()
1975 o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, dir, &(reg->hr_db_regnum), in o2hb_debug_region_init()
1976 sizeof(*(reg->hr_db_regnum)), in o2hb_debug_region_init()
1980 &(reg->hr_db_elapsed_time), in o2hb_debug_region_init()
1981 sizeof(*(reg->hr_db_elapsed_time)), in o2hb_debug_region_init()
1984 o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, dir, &(reg->hr_db_pinned), in o2hb_debug_region_init()
1985 sizeof(*(reg->hr_db_pinned)), in o2hb_debug_region_init()
1998 return ERR_PTR(-ENOMEM); in o2hb_heartbeat_group_make_item()
2001 ret = -ENAMETOOLONG; in o2hb_heartbeat_group_make_item()
2006 reg->hr_region_num = 0; in o2hb_heartbeat_group_make_item()
2008 reg->hr_region_num = find_first_zero_bit(o2hb_region_bitmap, in o2hb_heartbeat_group_make_item()
2010 if (reg->hr_region_num >= O2NM_MAX_REGIONS) { in o2hb_heartbeat_group_make_item()
2012 ret = -EFBIG; in o2hb_heartbeat_group_make_item()
2015 set_bit(reg->hr_region_num, o2hb_region_bitmap); in o2hb_heartbeat_group_make_item()
2017 list_add_tail(&reg->hr_all_item, &o2hb_all_regions); in o2hb_heartbeat_group_make_item()
2020 config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type); in o2hb_heartbeat_group_make_item()
2026 reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS, in o2hb_heartbeat_group_make_item()
2028 INIT_LIST_HEAD(&reg->hr_handler_list); in o2hb_heartbeat_group_make_item()
2029 ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key, in o2hb_heartbeat_group_make_item()
2032 reg, NULL, &reg->hr_handler_list); in o2hb_heartbeat_group_make_item()
2036 ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key, in o2hb_heartbeat_group_make_item()
2039 reg, NULL, &reg->hr_handler_list); in o2hb_heartbeat_group_make_item()
2045 return &reg->hr_item; in o2hb_heartbeat_group_make_item()
2048 o2net_unregister_handler_list(&reg->hr_handler_list); in o2hb_heartbeat_group_make_item()
2051 list_del(&reg->hr_all_item); in o2hb_heartbeat_group_make_item()
2053 clear_bit(reg->hr_region_num, o2hb_region_bitmap); in o2hb_heartbeat_group_make_item()
2069 hb_task = reg->hr_task; in o2hb_heartbeat_group_drop_item()
2070 reg->hr_task = NULL; in o2hb_heartbeat_group_drop_item()
2071 reg->hr_item_dropped = 1; in o2hb_heartbeat_group_drop_item()
2079 clear_bit(reg->hr_region_num, o2hb_region_bitmap); in o2hb_heartbeat_group_drop_item()
2080 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); in o2hb_heartbeat_group_drop_item()
2081 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) in o2hb_heartbeat_group_drop_item()
2083 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); in o2hb_heartbeat_group_drop_item()
2086 ((atomic_read(&reg->hr_steady_iterations) == 0) ? in o2hb_heartbeat_group_drop_item()
2088 reg->hr_bdev); in o2hb_heartbeat_group_drop_item()
2093 * check reg->hr_task in o2hb_heartbeat_group_drop_item()
2095 if (atomic_read(&reg->hr_steady_iterations) != 0) { in o2hb_heartbeat_group_drop_item()
2096 reg->hr_aborted_start = 1; in o2hb_heartbeat_group_drop_item()
2097 atomic_set(&reg->hr_steady_iterations, 0); in o2hb_heartbeat_group_drop_item()
2137 return -EINVAL; in o2hb_heartbeat_group_dead_threshold_store()
2159 len = (page[count - 1] == '\n') ? count - 1 : count; in o2hb_heartbeat_group_mode_store()
2161 return -EINVAL; in o2hb_heartbeat_group_mode_store()
2174 return -EINVAL; in o2hb_heartbeat_group_mode_store()
2209 config_group_init_type_name(&hs->hs_group, "heartbeat", in o2hb_alloc_hb_set()
2212 ret = &hs->hs_group; in o2hb_alloc_hb_set()
2230 return ERR_PTR(-EINVAL); in hbcall_from_type()
2241 INIT_LIST_HEAD(&hc->hc_item); in o2hb_setup_callback()
2242 hc->hc_func = func; in o2hb_setup_callback()
2243 hc->hc_data = data; in o2hb_setup_callback()
2244 hc->hc_priority = priority; in o2hb_setup_callback()
2245 hc->hc_type = type; in o2hb_setup_callback()
2246 hc->hc_magic = O2HB_CB_MAGIC; in o2hb_setup_callback()
2266 if (reg->hr_item_dropped) in o2hb_region_pin()
2269 uuid = config_item_name(&reg->hr_item); in o2hb_region_pin()
2278 if (reg->hr_item_pinned || reg->hr_item_dropped) in o2hb_region_pin()
2282 ret = o2nm_depend_item(&reg->hr_item); in o2hb_region_pin()
2285 reg->hr_item_pinned = 1; in o2hb_region_pin()
2287 if (ret == -ENOENT && found) in o2hb_region_pin()
2319 if (reg->hr_item_dropped) in o2hb_region_unpin()
2322 uuid = config_item_name(&reg->hr_item); in o2hb_region_unpin()
2329 if (reg->hr_item_pinned) { in o2hb_region_unpin()
2331 o2nm_undepend_item(&reg->hr_item); in o2hb_region_unpin()
2332 reg->hr_item_pinned = 0; in o2hb_region_unpin()
2382 o2hb_dependent_users--; in o2hb_region_dec_user()
2397 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); in o2hb_register_callback()
2398 BUG_ON(!list_empty(&hc->hc_item)); in o2hb_register_callback()
2400 hbcall = hbcall_from_type(hc->hc_type); in o2hb_register_callback()
2416 list_for_each_entry(f, &hbcall->list, hc_item) { in o2hb_register_callback()
2417 if (hc->hc_priority < f->hc_priority) { in o2hb_register_callback()
2418 list_add_tail(&hc->hc_item, &f->hc_item); in o2hb_register_callback()
2422 if (list_empty(&hc->hc_item)) in o2hb_register_callback()
2423 list_add_tail(&hc->hc_item, &hbcall->list); in o2hb_register_callback()
2437 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); in o2hb_unregister_callback()
2443 if (list_empty(&hc->hc_item)) in o2hb_unregister_callback()
2451 list_del_init(&hc->hc_item); in o2hb_unregister_callback()
2504 reg->hr_unclean_stop = 1; in o2hb_stop_all_regions()
2520 if (reg->hr_item_dropped) in o2hb_get_all_regions()
2523 mlog(0, "Region: %s\n", config_item_name(&reg->hr_item)); in o2hb_get_all_regions()
2525 memcpy(p, config_item_name(&reg->hr_item), in o2hb_get_all_regions()