Lines Matching +full:oc +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
5 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
52 L: send_xxxx() -> R: receive_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
95 * Lock compatibilty matrix - thanks Steve
120 * -1 = nothing happens to the LVB
125 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
126 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
127 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
128 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
129 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
130 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
131 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
132 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
165 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, in dlm_print_lkb()
166 dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_rqmode, in dlm_print_lkb()
167 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid, in dlm_print_lkb()
168 (unsigned long long)lkb->lkb_recover_seq); in dlm_print_lkb()
175 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid, in dlm_print_rsb()
176 r->res_flags, r->res_first_lkid, r->res_recover_locks_count, in dlm_print_rsb()
177 r->res_name); in dlm_print_rsb()
187 list_empty(&r->res_root_list), list_empty(&r->res_recover_list)); in dlm_dump_rsb()
189 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) in dlm_dump_rsb()
192 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) in dlm_dump_rsb()
195 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) in dlm_dump_rsb()
198 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) in dlm_dump_rsb()
206 down_read(&ls->ls_in_recovery); in dlm_lock_recovery()
211 up_read(&ls->ls_in_recovery); in dlm_unlock_recovery()
216 return down_read_trylock(&ls->ls_in_recovery); in dlm_lock_recovery_try()
221 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE); in can_be_queued()
226 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST); in force_blocking_asts()
231 return test_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); in is_demoted()
236 return test_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); in is_altmode()
241 return (lkb->lkb_status == DLM_LKSTS_GRANTED); in is_granted()
246 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r);); in is_remote()
247 return !!r->res_nodeid; in is_remote()
252 return lkb->lkb_nodeid && in is_process_copy()
253 !test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in is_process_copy()
258 return test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in is_master_copy()
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) || in middle_conversion()
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW)) in middle_conversion()
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode); in down_conversion()
276 return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in is_overlap_unlock()
281 return test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in is_overlap_cancel()
286 return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags) || in is_overlap()
287 test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in is_overlap()
295 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb);); in queue_cast()
297 if (rv == -DLM_ECANCEL && in queue_cast()
298 test_and_clear_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags)) in queue_cast()
299 rv = -EDEADLK; in queue_cast()
301 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, dlm_sbflags_val(lkb)); in queue_cast()
307 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL); in queue_cast_overlap()
328 kref_get(&r->res_ref); in hold_rsb()
341 struct dlm_ls *ls = r->res_ls; in put_rsb()
342 uint32_t bucket = r->res_bucket; in put_rsb()
345 rv = kref_put_lock(&r->res_ref, toss_rsb, in put_rsb()
346 &ls->ls_rsbtbl[bucket].lock); in put_rsb()
348 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in put_rsb()
361 spin_lock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
362 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { in pre_rsb_struct()
363 spin_unlock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
366 spin_unlock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
371 spin_lock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
373 list_add(&r1->res_hashchain, &ls->ls_new_rsb); in pre_rsb_struct()
374 ls->ls_new_rsb_count++; in pre_rsb_struct()
377 list_add(&r2->res_hashchain, &ls->ls_new_rsb); in pre_rsb_struct()
378 ls->ls_new_rsb_count++; in pre_rsb_struct()
380 count = ls->ls_new_rsb_count; in pre_rsb_struct()
381 spin_unlock(&ls->ls_new_rsb_spin); in pre_rsb_struct()
384 return -ENOMEM; in pre_rsb_struct()
388 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
398 spin_lock(&ls->ls_new_rsb_spin); in get_rsb_struct()
399 if (list_empty(&ls->ls_new_rsb)) { in get_rsb_struct()
400 count = ls->ls_new_rsb_count; in get_rsb_struct()
401 spin_unlock(&ls->ls_new_rsb_spin); in get_rsb_struct()
405 return -EAGAIN; in get_rsb_struct()
408 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); in get_rsb_struct()
409 list_del(&r->res_hashchain); in get_rsb_struct()
411 memset(&r->res_hashnode, 0, sizeof(struct rb_node)); in get_rsb_struct()
412 ls->ls_new_rsb_count--; in get_rsb_struct()
413 spin_unlock(&ls->ls_new_rsb_spin); in get_rsb_struct()
415 r->res_ls = ls; in get_rsb_struct()
416 r->res_length = len; in get_rsb_struct()
417 memcpy(r->res_name, name, len); in get_rsb_struct()
418 mutex_init(&r->res_mutex); in get_rsb_struct()
420 INIT_LIST_HEAD(&r->res_lookup); in get_rsb_struct()
421 INIT_LIST_HEAD(&r->res_grantqueue); in get_rsb_struct()
422 INIT_LIST_HEAD(&r->res_convertqueue); in get_rsb_struct()
423 INIT_LIST_HEAD(&r->res_waitqueue); in get_rsb_struct()
424 INIT_LIST_HEAD(&r->res_root_list); in get_rsb_struct()
425 INIT_LIST_HEAD(&r->res_recover_list); in get_rsb_struct()
437 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN); in rsb_cmp()
443 struct rb_node *node = tree->rb_node; in dlm_search_rsb_tree()
451 node = node->rb_left; in dlm_search_rsb_tree()
453 node = node->rb_right; in dlm_search_rsb_tree()
458 return -EBADR; in dlm_search_rsb_tree()
467 struct rb_node **newn = &tree->rb_node; in rsb_insert()
476 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length); in rsb_insert()
478 newn = &parent->rb_left; in rsb_insert()
480 newn = &parent->rb_right; in rsb_insert()
485 return -EEXIST; in rsb_insert()
489 rb_link_node(&rsb->res_hashnode, parent, newn); in rsb_insert()
490 rb_insert_color(&rsb->res_hashnode, tree); in rsb_insert()
501 * to excessive master lookups and removals if we don't delay the release.
511 * - previously used locally but not any more (were on keep list, then
513 * - created and put on toss list as a directory record for a lookup
562 * from_nodeid has sent us a lock in dlm_recover_locks, believing in find_rsb_dir()
568 * If someone sends us a request, we are the dir node, and we do in find_rsb_dir()
570 * someone sends us a request after we have removed/freed an rsb in find_rsb_dir()
587 spin_lock(&ls->ls_rsbtbl[b].lock); in find_rsb_dir()
589 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in find_rsb_dir()
597 kref_get(&r->res_ref); in find_rsb_dir()
602 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in find_rsb_dir()
613 if ((r->res_master_nodeid != our_nodeid) && from_other) { in find_rsb_dir()
615 has sent us a request */ in find_rsb_dir()
617 from_nodeid, r->res_master_nodeid, dir_nodeid, in find_rsb_dir()
618 r->res_name); in find_rsb_dir()
619 error = -ENOTBLK; in find_rsb_dir()
623 if ((r->res_master_nodeid != our_nodeid) && from_dir) { in find_rsb_dir()
626 from_nodeid, r->res_master_nodeid); in find_rsb_dir()
629 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
630 r->res_nodeid = 0; in find_rsb_dir()
632 r->res_first_lkid = 0; in find_rsb_dir()
635 if (from_local && (r->res_master_nodeid != our_nodeid)) { in find_rsb_dir()
639 r->res_first_lkid = 0; in find_rsb_dir()
642 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in find_rsb_dir()
643 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_dir()
652 if (error == -EBADR && !create) in find_rsb_dir()
656 if (error == -EAGAIN) { in find_rsb_dir()
657 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_dir()
663 r->res_hash = hash; in find_rsb_dir()
664 r->res_bucket = b; in find_rsb_dir()
665 r->res_dir_nodeid = dir_nodeid; in find_rsb_dir()
666 kref_init(&r->res_ref); in find_rsb_dir()
671 from_nodeid, r->res_name); in find_rsb_dir()
672 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
673 r->res_nodeid = 0; in find_rsb_dir()
680 from_nodeid, dir_nodeid, our_nodeid, r->res_name); in find_rsb_dir()
683 error = -ENOTBLK; in find_rsb_dir()
689 from_nodeid, dir_nodeid, r->res_name); in find_rsb_dir()
695 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
696 r->res_nodeid = 0; in find_rsb_dir()
699 r->res_master_nodeid = 0; in find_rsb_dir()
700 r->res_nodeid = -1; in find_rsb_dir()
704 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_dir()
706 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_dir()
712 /* During recovery, other nodes can send us new MSTCPY locks (from
731 spin_lock(&ls->ls_rsbtbl[b].lock); in find_rsb_nodir()
733 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in find_rsb_nodir()
741 kref_get(&r->res_ref); in find_rsb_nodir()
746 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in find_rsb_nodir()
756 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) { in find_rsb_nodir()
757 /* our rsb is not master, and another node has sent us a in find_rsb_nodir()
760 from_nodeid, r->res_master_nodeid, dir_nodeid); in find_rsb_nodir()
762 error = -ENOTBLK; in find_rsb_nodir()
766 if (!recover && (r->res_master_nodeid != our_nodeid) && in find_rsb_nodir()
771 our_nodeid, r->res_master_nodeid, dir_nodeid); in find_rsb_nodir()
773 r->res_master_nodeid = our_nodeid; in find_rsb_nodir()
774 r->res_nodeid = 0; in find_rsb_nodir()
777 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in find_rsb_nodir()
778 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_nodir()
788 if (error == -EAGAIN) { in find_rsb_nodir()
789 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_nodir()
795 r->res_hash = hash; in find_rsb_nodir()
796 r->res_bucket = b; in find_rsb_nodir()
797 r->res_dir_nodeid = dir_nodeid; in find_rsb_nodir()
798 r->res_master_nodeid = dir_nodeid; in find_rsb_nodir()
799 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; in find_rsb_nodir()
800 kref_init(&r->res_ref); in find_rsb_nodir()
802 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); in find_rsb_nodir()
804 spin_unlock(&ls->ls_rsbtbl[b].lock); in find_rsb_nodir()
818 return -EINVAL; in find_rsb()
821 b = hash & (ls->ls_rsbtbl_size - 1); in find_rsb()
841 from_nodeid, r->res_master_nodeid, in validate_master_nodeid()
842 r->res_dir_nodeid); in validate_master_nodeid()
844 return -ENOTBLK; in validate_master_nodeid()
847 if (from_nodeid != r->res_dir_nodeid) { in validate_master_nodeid()
849 has sent us a request. this is much more common when our in validate_master_nodeid()
850 master_nodeid is zero, so limit debug to non-zero. */ in validate_master_nodeid()
852 if (r->res_master_nodeid) { in validate_master_nodeid()
855 r->res_master_nodeid, r->res_dir_nodeid, in validate_master_nodeid()
856 r->res_first_lkid, r->res_name); in validate_master_nodeid()
858 return -ENOTBLK; in validate_master_nodeid()
860 /* our rsb is not master, but the dir nodeid has sent us a in validate_master_nodeid()
861 request; this could happen with master 0 / res_nodeid -1 */ in validate_master_nodeid()
863 if (r->res_master_nodeid) { in validate_master_nodeid()
866 from_nodeid, r->res_master_nodeid, in validate_master_nodeid()
867 r->res_first_lkid, r->res_name); in validate_master_nodeid()
870 r->res_master_nodeid = dlm_our_nodeid(); in validate_master_nodeid()
871 r->res_nodeid = 0; in validate_master_nodeid()
883 if (r->res_dir_nodeid != our_nodeid) { in __dlm_master_lookup()
886 r->res_dir_nodeid, our_nodeid, r->res_name); in __dlm_master_lookup()
887 r->res_dir_nodeid = our_nodeid; in __dlm_master_lookup()
890 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) { in __dlm_master_lookup()
897 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
898 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
908 if (from_master && (r->res_master_nodeid != from_nodeid)) { in __dlm_master_lookup()
915 __func__, from_nodeid, r->res_master_nodeid, in __dlm_master_lookup()
916 r->res_nodeid, r->res_first_lkid, r->res_name); in __dlm_master_lookup()
918 if (r->res_master_nodeid == our_nodeid) { in __dlm_master_lookup()
924 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
925 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
929 if (!r->res_master_nodeid) { in __dlm_master_lookup()
935 from_nodeid, r->res_first_lkid, r->res_name); in __dlm_master_lookup()
936 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
937 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
941 (r->res_master_nodeid == from_nodeid)) { in __dlm_master_lookup()
948 __func__, from_nodeid, flags, r->res_first_lkid, in __dlm_master_lookup()
949 r->res_name); in __dlm_master_lookup()
953 *r_nodeid = r->res_master_nodeid; in __dlm_master_lookup()
996 return -EINVAL; in dlm_master_lookup()
1001 return -EINVAL; in dlm_master_lookup()
1005 b = hash & (ls->ls_rsbtbl_size - 1); in dlm_master_lookup()
1011 ls->ls_num_nodes); in dlm_master_lookup()
1012 *r_nodeid = -1; in dlm_master_lookup()
1013 return -EINVAL; in dlm_master_lookup()
1021 spin_lock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1022 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in dlm_master_lookup()
1029 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1042 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in dlm_master_lookup()
1053 r->res_toss_time = jiffies; in dlm_master_lookup()
1055 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1061 if (error == -EAGAIN) { in dlm_master_lookup()
1062 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1068 r->res_hash = hash; in dlm_master_lookup()
1069 r->res_bucket = b; in dlm_master_lookup()
1070 r->res_dir_nodeid = our_nodeid; in dlm_master_lookup()
1071 r->res_master_nodeid = from_nodeid; in dlm_master_lookup()
1072 r->res_nodeid = from_nodeid; in dlm_master_lookup()
1073 kref_init(&r->res_ref); in dlm_master_lookup()
1074 r->res_toss_time = jiffies; in dlm_master_lookup()
1076 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss); in dlm_master_lookup()
1080 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1088 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_master_lookup()
1098 for (i = 0; i < ls->ls_rsbtbl_size; i++) { in dlm_dump_rsb_hash()
1099 spin_lock(&ls->ls_rsbtbl[i].lock); in dlm_dump_rsb_hash()
1100 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { in dlm_dump_rsb_hash()
1102 if (r->res_hash == hash) in dlm_dump_rsb_hash()
1105 spin_unlock(&ls->ls_rsbtbl[i].lock); in dlm_dump_rsb_hash()
1116 b = hash & (ls->ls_rsbtbl_size - 1); in dlm_dump_rsb_name()
1118 spin_lock(&ls->ls_rsbtbl[b].lock); in dlm_dump_rsb_name()
1119 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in dlm_dump_rsb_name()
1123 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in dlm_dump_rsb_name()
1129 spin_unlock(&ls->ls_rsbtbl[b].lock); in dlm_dump_rsb_name()
1135 struct dlm_ls *ls = r->res_ls; in toss_rsb()
1137 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); in toss_rsb()
1138 kref_init(&r->res_ref); in toss_rsb()
1139 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep); in toss_rsb()
1140 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss); in toss_rsb()
1141 r->res_toss_time = jiffies; in toss_rsb()
1142 set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags); in toss_rsb()
1143 if (r->res_lvbptr) { in toss_rsb()
1144 dlm_free_lvb(r->res_lvbptr); in toss_rsb()
1145 r->res_lvbptr = NULL; in toss_rsb()
1154 rv = kref_put(&r->res_ref, toss_rsb); in unhold_rsb()
1165 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); in kill_rsb()
1166 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); in kill_rsb()
1167 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r);); in kill_rsb()
1168 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); in kill_rsb()
1169 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); in kill_rsb()
1170 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); in kill_rsb()
1179 lkb->lkb_resource = r; in attach_lkb()
1184 if (lkb->lkb_resource) { in detach_lkb()
1185 put_rsb(lkb->lkb_resource); in detach_lkb()
1186 lkb->lkb_resource = NULL; in detach_lkb()
1198 return -ENOMEM; in _create_lkb()
1200 lkb->lkb_last_bast_mode = -1; in _create_lkb()
1201 lkb->lkb_nodeid = -1; in _create_lkb()
1202 lkb->lkb_grmode = DLM_LOCK_IV; in _create_lkb()
1203 kref_init(&lkb->lkb_ref); in _create_lkb()
1204 INIT_LIST_HEAD(&lkb->lkb_ownqueue); in _create_lkb()
1205 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); in _create_lkb()
1206 INIT_LIST_HEAD(&lkb->lkb_cb_list); in _create_lkb()
1207 INIT_LIST_HEAD(&lkb->lkb_callbacks); in _create_lkb()
1208 spin_lock_init(&lkb->lkb_cb_lock); in _create_lkb()
1209 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); in _create_lkb()
1212 spin_lock(&ls->ls_lkbidr_spin); in _create_lkb()
1213 rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); in _create_lkb()
1215 lkb->lkb_id = rv; in _create_lkb()
1216 spin_unlock(&ls->ls_lkbidr_spin); in _create_lkb()
1238 spin_lock(&ls->ls_lkbidr_spin); in find_lkb()
1239 lkb = idr_find(&ls->ls_lkbidr, lkid); in find_lkb()
1241 kref_get(&lkb->lkb_ref); in find_lkb()
1242 spin_unlock(&ls->ls_lkbidr_spin); in find_lkb()
1245 return lkb ? 0 : -ENOENT; in find_lkb()
1255 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); in kill_lkb()
1263 uint32_t lkid = lkb->lkb_id; in __put_lkb()
1266 rv = kref_put_lock(&lkb->lkb_ref, kill_lkb, in __put_lkb()
1267 &ls->ls_lkbidr_spin); in __put_lkb()
1269 idr_remove(&ls->ls_lkbidr, lkid); in __put_lkb()
1270 spin_unlock(&ls->ls_lkbidr_spin); in __put_lkb()
1275 if (lkb->lkb_lvbptr && is_master_copy(lkb)) in __put_lkb()
1276 dlm_free_lvb(lkb->lkb_lvbptr); in __put_lkb()
1287 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb);); in dlm_put_lkb()
1288 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb);); in dlm_put_lkb()
1290 ls = lkb->lkb_resource->res_ls; in dlm_put_lkb()
1299 kref_get(&lkb->lkb_ref); in hold_lkb()
1316 kref_put(&lkb->lkb_ref, unhold_lkb_assert); in unhold_lkb()
1325 if (iter->lkb_rqmode < mode) { in lkb_add_ordered()
1327 list_add_tail(new, &iter->lkb_statequeue); in lkb_add_ordered()
1339 kref_get(&lkb->lkb_ref); in add_lkb()
1341 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); in add_lkb()
1343 lkb->lkb_timestamp = ktime_get(); in add_lkb()
1345 lkb->lkb_status = status; in add_lkb()
1349 if (lkb->lkb_exflags & DLM_LKF_HEADQUE) in add_lkb()
1350 list_add(&lkb->lkb_statequeue, &r->res_waitqueue); in add_lkb()
1352 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue); in add_lkb()
1356 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue, in add_lkb()
1357 lkb->lkb_grmode); in add_lkb()
1360 if (lkb->lkb_exflags & DLM_LKF_HEADQUE) in add_lkb()
1361 list_add(&lkb->lkb_statequeue, &r->res_convertqueue); in add_lkb()
1363 list_add_tail(&lkb->lkb_statequeue, in add_lkb()
1364 &r->res_convertqueue); in add_lkb()
1373 lkb->lkb_status = 0; in del_lkb()
1374 list_del(&lkb->lkb_statequeue); in del_lkb()
1400 return -1; in msg_reply_type()
1408 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in add_to_waiters()
1412 mutex_lock(&ls->ls_waiters_mutex); in add_to_waiters()
1416 error = -EINVAL; in add_to_waiters()
1420 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) { in add_to_waiters()
1423 set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in add_to_waiters()
1426 set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in add_to_waiters()
1429 error = -EBUSY; in add_to_waiters()
1432 wc = atomic_inc_return(&lkb->lkb_wait_count); in add_to_waiters()
1436 lkb->lkb_id, lkb->lkb_wait_type, mstype, wc, in add_to_waiters()
1441 wc = atomic_fetch_inc(&lkb->lkb_wait_count); in add_to_waiters()
1443 lkb->lkb_wait_type = mstype; in add_to_waiters()
1444 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */ in add_to_waiters()
1446 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); in add_to_waiters()
1450 lkb->lkb_id, error, dlm_iflags_val(lkb), mstype, in add_to_waiters()
1451 lkb->lkb_wait_type, lkb->lkb_resource->res_name); in add_to_waiters()
1452 mutex_unlock(&ls->ls_waiters_mutex); in add_to_waiters()
1464 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in _remove_from_waiters()
1468 test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { in _remove_from_waiters()
1469 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); in _remove_from_waiters()
1475 test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { in _remove_from_waiters()
1476 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); in _remove_from_waiters()
1485 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) { in _remove_from_waiters()
1487 lkb->lkb_id, lkb->lkb_wait_type); in _remove_from_waiters()
1488 return -1; in _remove_from_waiters()
1497 lingering state of the cancel and fail with -EBUSY. */ in _remove_from_waiters()
1500 (lkb->lkb_wait_type == DLM_MSG_CONVERT) && ms && !ms->m_result && in _remove_from_waiters()
1501 test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { in _remove_from_waiters()
1503 lkb->lkb_id); in _remove_from_waiters()
1504 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1505 atomic_dec(&lkb->lkb_wait_count); in _remove_from_waiters()
1511 msg due to lookup->request optimization, verify others? */ in _remove_from_waiters()
1513 if (lkb->lkb_wait_type) { in _remove_from_waiters()
1514 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1519 lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0, in _remove_from_waiters()
1520 lkb->lkb_remid, mstype, dlm_iflags_val(lkb)); in _remove_from_waiters()
1521 return -1; in _remove_from_waiters()
1524 /* the force-unlock/cancel has completed and we haven't recvd a reply in _remove_from_waiters()
1529 if (overlap_done && lkb->lkb_wait_type) { in _remove_from_waiters()
1531 lkb->lkb_id, mstype, lkb->lkb_wait_type); in _remove_from_waiters()
1532 atomic_dec(&lkb->lkb_wait_count); in _remove_from_waiters()
1534 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1537 DLM_ASSERT(atomic_read(&lkb->lkb_wait_count), dlm_print_lkb(lkb);); in _remove_from_waiters()
1539 clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in _remove_from_waiters()
1540 if (atomic_dec_and_test(&lkb->lkb_wait_count)) in _remove_from_waiters()
1541 list_del_init(&lkb->lkb_wait_reply); in _remove_from_waiters()
1548 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in remove_from_waiters()
1551 mutex_lock(&ls->ls_waiters_mutex); in remove_from_waiters()
1553 mutex_unlock(&ls->ls_waiters_mutex); in remove_from_waiters()
1563 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in remove_from_waiters_ms()
1567 mutex_lock(&ls->ls_waiters_mutex); in remove_from_waiters_ms()
1568 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); in remove_from_waiters_ms()
1570 mutex_unlock(&ls->ls_waiters_mutex); in remove_from_waiters_ms()
1584 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); in shrink_bucket()
1586 spin_lock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1588 if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { in shrink_bucket()
1589 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1593 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { in shrink_bucket()
1599 for the master node to send us a dir remove for in shrink_bucket()
1603 (r->res_master_nodeid != our_nodeid) && in shrink_bucket()
1610 if (!time_after_eq(jiffies, r->res_toss_time + in shrink_bucket()
1616 (r->res_master_nodeid == our_nodeid) && in shrink_bucket()
1623 ls->ls_remove_lens[remote_count] = r->res_length; in shrink_bucket()
1624 memcpy(ls->ls_remove_names[remote_count], r->res_name, in shrink_bucket()
1633 if (!kref_put(&r->res_ref, kill_rsb)) { in shrink_bucket()
1634 log_error(ls, "tossed rsb in use %s", r->res_name); in shrink_bucket()
1638 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in shrink_bucket()
1643 set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); in shrink_bucket()
1645 clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); in shrink_bucket()
1646 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1653 * important to keep us (the master node) from being out of sync in shrink_bucket()
1658 name = ls->ls_remove_names[i]; in shrink_bucket()
1659 len = ls->ls_remove_lens[i]; in shrink_bucket()
1661 spin_lock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1662 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in shrink_bucket()
1664 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1669 if (r->res_master_nodeid != our_nodeid) { in shrink_bucket()
1670 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1672 r->res_master_nodeid, r->res_dir_nodeid, in shrink_bucket()
1677 if (r->res_dir_nodeid == our_nodeid) { in shrink_bucket()
1679 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1681 r->res_dir_nodeid, r->res_master_nodeid, in shrink_bucket()
1686 if (!time_after_eq(jiffies, r->res_toss_time + in shrink_bucket()
1688 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1690 r->res_toss_time, jiffies, name); in shrink_bucket()
1694 if (!kref_put(&r->res_ref, kill_rsb)) { in shrink_bucket()
1695 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1700 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in shrink_bucket()
1702 spin_unlock(&ls->ls_rsbtbl[b].lock); in shrink_bucket()
1712 for (i = 0; i < ls->ls_rsbtbl_size; i++) { in dlm_scan_rsbs()
1724 int b, len = r->res_ls->ls_lvblen; in set_lvb_lock()
1728 b=-1 do nothing */ in set_lvb_lock()
1730 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; in set_lvb_lock()
1733 if (!lkb->lkb_lvbptr) in set_lvb_lock()
1736 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock()
1739 if (!r->res_lvbptr) in set_lvb_lock()
1742 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len); in set_lvb_lock()
1743 lkb->lkb_lvbseq = r->res_lvbseq; in set_lvb_lock()
1746 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { in set_lvb_lock()
1751 if (!lkb->lkb_lvbptr) in set_lvb_lock()
1754 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock()
1757 if (!r->res_lvbptr) in set_lvb_lock()
1758 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); in set_lvb_lock()
1760 if (!r->res_lvbptr) in set_lvb_lock()
1763 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len); in set_lvb_lock()
1764 r->res_lvbseq++; in set_lvb_lock()
1765 lkb->lkb_lvbseq = r->res_lvbseq; in set_lvb_lock()
1770 set_bit(DLM_SBF_VALNOTVALID_BIT, &lkb->lkb_sbflags); in set_lvb_lock()
1775 if (lkb->lkb_grmode < DLM_LOCK_PW) in set_lvb_unlock()
1778 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { in set_lvb_unlock()
1783 if (!lkb->lkb_lvbptr) in set_lvb_unlock()
1786 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_unlock()
1789 if (!r->res_lvbptr) in set_lvb_unlock()
1790 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); in set_lvb_unlock()
1792 if (!r->res_lvbptr) in set_lvb_unlock()
1795 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); in set_lvb_unlock()
1796 r->res_lvbseq++; in set_lvb_unlock()
1807 if (!lkb->lkb_lvbptr) in set_lvb_lock_pc()
1810 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock_pc()
1813 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; in set_lvb_lock_pc()
1816 if (len > r->res_ls->ls_lvblen) in set_lvb_lock_pc()
1817 len = r->res_ls->ls_lvblen; in set_lvb_lock_pc()
1818 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); in set_lvb_lock_pc()
1819 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); in set_lvb_lock_pc()
1824 remove_lock -- used for unlock, removes lkb from granted
1825 revert_lock -- used for cancel, moves lkb from convert to granted
1826 grant_lock -- used for request and convert, adds lkb to granted or
1836 lkb->lkb_grmode = DLM_LOCK_IV; in _remove_lock()
1855 -1 removed lock */
1861 lkb->lkb_rqmode = DLM_LOCK_IV; in revert_lock()
1863 switch (lkb->lkb_status) { in revert_lock()
1872 lkb->lkb_grmode = DLM_LOCK_IV; in revert_lock()
1876 rv = -1; in revert_lock()
1879 log_print("invalid status for revert %d", lkb->lkb_status); in revert_lock()
1891 if (lkb->lkb_grmode != lkb->lkb_rqmode) { in _grant_lock()
1892 lkb->lkb_grmode = lkb->lkb_rqmode; in _grant_lock()
1893 if (lkb->lkb_status) in _grant_lock()
1899 lkb->lkb_rqmode = DLM_LOCK_IV; in _grant_lock()
1900 lkb->lkb_highbast = 0; in _grant_lock()
1939 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) { in munge_demoted()
1941 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode); in munge_demoted()
1945 lkb->lkb_grmode = DLM_LOCK_NL; in munge_demoted()
1950 if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) && in munge_altmode()
1951 ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) { in munge_altmode()
1953 lkb->lkb_id, le32_to_cpu(ms->m_type)); in munge_altmode()
1957 if (lkb->lkb_exflags & DLM_LKF_ALTPR) in munge_altmode()
1958 lkb->lkb_rqmode = DLM_LOCK_PR; in munge_altmode()
1959 else if (lkb->lkb_exflags & DLM_LKF_ALTCW) in munge_altmode()
1960 lkb->lkb_rqmode = DLM_LOCK_CW; in munge_altmode()
1962 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags); in munge_altmode()
1969 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb, in first_in_list()
1971 if (lkb->lkb_id == first->lkb_id) in first_in_list()
2002 * Convert Queue: NL->EX (first lock)
2003 * PR->EX (second lock)
2007 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2011 * Originally, this function detected conv-deadlk in a more limited scope:
2012 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2013 * - if lkb1 was the first entry in the queue (not just earlier), and was
2018 * That second condition meant we'd only say there was conv-deadlk if
2029 * be zero, i.e. there will never be conv-deadlk between two locks that are
2038 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) { in conversion_deadlock_detect()
2075 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV); in _can_be_granted()
2078 * 6-10: Version 5.4 introduced an option to address the phenomenon of in _can_be_granted()
2081 * 6-11: If the optional EXPEDITE flag is used with the new NL mode in _can_be_granted()
2089 * conversion or used with a non-NL requested mode. We also know an in _can_be_granted()
2092 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can in _can_be_granted()
2096 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE) in _can_be_granted()
2104 if (queue_conflict(&r->res_grantqueue, lkb)) in _can_be_granted()
2108 * 6-3: By default, a conversion request is immediately granted if the in _can_be_granted()
2113 if (queue_conflict(&r->res_convertqueue, lkb)) in _can_be_granted()
2124 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX. in _can_be_granted()
2126 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after in _can_be_granted()
2134 * 6-5: But the default algorithm for deciding whether to grant or in _can_be_granted()
2139 * 6-7: This issue is dealt with by using the optional QUECVT flag with in _can_be_granted()
2156 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT)) in _can_be_granted()
2164 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) { in _can_be_granted()
2165 if (list_empty(&r->res_convertqueue)) in _can_be_granted()
2176 if (lkb->lkb_exflags & DLM_LKF_NOORDER) in _can_be_granted()
2180 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be in _can_be_granted()
2185 if (!now && conv && first_in_list(lkb, &r->res_convertqueue)) in _can_be_granted()
2189 * 6-4: By default, a new request is immediately granted only if all in _can_be_granted()
2192 * - The queue of ungranted conversion requests for the resource is in _can_be_granted()
2194 * - The queue of ungranted new requests for the resource is empty. in _can_be_granted()
2195 * - The mode of the new request is compatible with the most in _can_be_granted()
2199 if (now && !conv && list_empty(&r->res_convertqueue) && in _can_be_granted()
2200 list_empty(&r->res_waitqueue)) in _can_be_granted()
2204 * 6-4: Once a lock request is in the queue of ungranted new requests, in _can_be_granted()
2211 if (!now && !conv && list_empty(&r->res_convertqueue) && in _can_be_granted()
2212 first_in_list(lkb, &r->res_waitqueue)) in _can_be_granted()
2222 int8_t alt = 0, rqmode = lkb->lkb_rqmode; in can_be_granted()
2223 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV); in can_be_granted()
2233 * The CONVDEADLK flag is non-standard and tells the dlm to resolve in can_be_granted()
2240 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) { in can_be_granted()
2241 lkb->lkb_grmode = DLM_LOCK_NL; in can_be_granted()
2242 set_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); in can_be_granted()
2244 *err = -EDEADLK; in can_be_granted()
2247 lkb->lkb_id, now); in can_be_granted()
2254 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try in can_be_granted()
2260 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR)) in can_be_granted()
2262 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW)) in can_be_granted()
2266 lkb->lkb_rqmode = alt; in can_be_granted()
2269 set_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); in can_be_granted()
2271 lkb->lkb_rqmode = rqmode; in can_be_granted()
2294 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) { in grant_pending_convert()
2308 lkb->lkb_id, lkb->lkb_nodeid, r->res_name); in grant_pending_convert()
2319 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) { in grant_pending_convert()
2320 if (lkb->lkb_highbast < lkb->lkb_rqmode) { in grant_pending_convert()
2321 queue_bast(r, lkb, lkb->lkb_rqmode); in grant_pending_convert()
2322 lkb->lkb_highbast = lkb->lkb_rqmode; in grant_pending_convert()
2326 lkb->lkb_id, lkb->lkb_nodeid, in grant_pending_convert()
2327 r->res_name); in grant_pending_convert()
2333 hi = max_t(int, lkb->lkb_rqmode, hi); in grant_pending_convert()
2335 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW) in grant_pending_convert()
2354 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) { in grant_pending_wait()
2360 high = max_t(int, lkb->lkb_rqmode, high); in grant_pending_wait()
2361 if (lkb->lkb_rqmode == DLM_LOCK_CW) in grant_pending_wait()
2376 if (gr->lkb_grmode == DLM_LOCK_PR && cw) { in lock_requires_bast()
2377 if (gr->lkb_highbast < DLM_LOCK_EX) in lock_requires_bast()
2382 if (gr->lkb_highbast < high && in lock_requires_bast()
2383 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1]) in lock_requires_bast()
2395 log_print("grant_pending_locks r nodeid %d", r->res_nodeid); in grant_pending_locks()
2412 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { in grant_pending_locks()
2413 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) { in grant_pending_locks()
2415 lkb->lkb_grmode == DLM_LOCK_PR) in grant_pending_locks()
2419 lkb->lkb_highbast = high; in grant_pending_locks()
2426 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) || in modes_require_bast()
2427 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) { in modes_require_bast()
2428 if (gr->lkb_highbast < DLM_LOCK_EX) in modes_require_bast()
2433 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq)) in modes_require_bast()
2447 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { in send_bast_queue()
2448 queue_bast(r, gr, lkb->lkb_rqmode); in send_bast_queue()
2449 gr->lkb_highbast = lkb->lkb_rqmode; in send_bast_queue()
2456 send_bast_queue(r, &r->res_grantqueue, lkb); in send_blocking_asts()
2461 send_bast_queue(r, &r->res_grantqueue, lkb); in send_blocking_asts_all()
2462 send_bast_queue(r, &r->res_convertqueue, lkb); in send_blocking_asts_all()
2465 /* set_master(r, lkb) -- set the master nodeid of a resource
2490 r->res_first_lkid = lkb->lkb_id; in set_master()
2491 lkb->lkb_nodeid = r->res_nodeid; in set_master()
2495 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) { in set_master()
2496 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup); in set_master()
2500 if (r->res_master_nodeid == our_nodeid) { in set_master()
2501 lkb->lkb_nodeid = 0; in set_master()
2505 if (r->res_master_nodeid) { in set_master()
2506 lkb->lkb_nodeid = r->res_master_nodeid; in set_master()
2517 log_debug(r->res_ls, "set_master %x self master %d dir %d %s", in set_master()
2518 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid, in set_master()
2519 r->res_name); in set_master()
2520 r->res_master_nodeid = our_nodeid; in set_master()
2521 r->res_nodeid = 0; in set_master()
2522 lkb->lkb_nodeid = 0; in set_master()
2526 r->res_first_lkid = lkb->lkb_id; in set_master()
2535 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) { in process_lookup_list()
2536 list_del_init(&lkb->lkb_rsb_lookup); in process_lookup_list()
2542 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2548 if (!r->res_first_lkid) in confirm_master()
2553 case -EINPROGRESS: in confirm_master()
2554 r->res_first_lkid = 0; in confirm_master()
2558 case -EAGAIN: in confirm_master()
2559 case -EBADR: in confirm_master()
2560 case -ENOTBLK: in confirm_master()
2565 r->res_first_lkid = 0; in confirm_master()
2567 if (!list_empty(&r->res_lookup)) { in confirm_master()
2568 lkb = list_entry(r->res_lookup.next, struct dlm_lkb, in confirm_master()
2570 list_del_init(&lkb->lkb_rsb_lookup); in confirm_master()
2571 r->res_first_lkid = lkb->lkb_id; in confirm_master()
2577 log_error(r->res_ls, "confirm_master unknown error %d", error); in confirm_master()
2587 int rv = -EINVAL; in set_lock_args()
2624 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr) in set_lock_args()
2627 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid) in set_lock_args()
2634 args->flags = flags; in set_lock_args()
2635 args->astfn = ast; in set_lock_args()
2636 args->astparam = astparam; in set_lock_args()
2637 args->bastfn = bast; in set_lock_args()
2638 args->mode = mode; in set_lock_args()
2639 args->lksb = lksb; in set_lock_args()
2649 return -EINVAL; in set_unlock_args()
2652 return -EINVAL; in set_unlock_args()
2654 args->flags = flags; in set_unlock_args()
2655 args->astparam = astarg; in set_unlock_args()
2662 int rv = -EBUSY; in validate_lock_args()
2664 if (args->flags & DLM_LKF_CONVERT) { in validate_lock_args()
2665 if (lkb->lkb_status != DLM_LKSTS_GRANTED) in validate_lock_args()
2669 if (lkb->lkb_wait_type || atomic_read(&lkb->lkb_wait_count)) in validate_lock_args()
2675 rv = -EINVAL; in validate_lock_args()
2676 if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) in validate_lock_args()
2679 if (args->flags & DLM_LKF_QUECVT && in validate_lock_args()
2680 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) in validate_lock_args()
2684 lkb->lkb_exflags = args->flags; in validate_lock_args()
2686 lkb->lkb_astfn = args->astfn; in validate_lock_args()
2687 lkb->lkb_astparam = args->astparam; in validate_lock_args()
2688 lkb->lkb_bastfn = args->bastfn; in validate_lock_args()
2689 lkb->lkb_rqmode = args->mode; in validate_lock_args()
2690 lkb->lkb_lksb = args->lksb; in validate_lock_args()
2691 lkb->lkb_lvbptr = args->lksb->sb_lvbptr; in validate_lock_args()
2692 lkb->lkb_ownpid = (int) current->pid; in validate_lock_args()
2698 case -EINVAL: in validate_lock_args()
2702 rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, in validate_lock_args()
2703 lkb->lkb_status, lkb->lkb_wait_type); in validate_lock_args()
2707 rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, in validate_lock_args()
2708 lkb->lkb_status, lkb->lkb_wait_type); in validate_lock_args()
2715 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2718 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2724 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in validate_unlock_args()
2725 int rv = -EBUSY; in validate_unlock_args()
2728 if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) && in validate_unlock_args()
2729 (lkb->lkb_wait_type || atomic_read(&lkb->lkb_wait_count))) in validate_unlock_args()
2735 if (!list_empty(&lkb->lkb_rsb_lookup)) { in validate_unlock_args()
2736 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) { in validate_unlock_args()
2737 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id); in validate_unlock_args()
2738 list_del_init(&lkb->lkb_rsb_lookup); in validate_unlock_args()
2739 queue_cast(lkb->lkb_resource, lkb, in validate_unlock_args()
2740 args->flags & DLM_LKF_CANCEL ? in validate_unlock_args()
2741 -DLM_ECANCEL : -DLM_EUNLOCK); in validate_unlock_args()
2744 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */ in validate_unlock_args()
2748 rv = -EINVAL; in validate_unlock_args()
2749 if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2750 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); in validate_unlock_args()
2760 if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2761 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); in validate_unlock_args()
2762 rv = -ENOENT; in validate_unlock_args()
2768 if (args->flags & DLM_LKF_CANCEL) { in validate_unlock_args()
2769 if (lkb->lkb_exflags & DLM_LKF_CANCEL) in validate_unlock_args()
2775 if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2776 set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2777 rv = -EBUSY; in validate_unlock_args()
2782 if (lkb->lkb_status == DLM_LKSTS_GRANTED && in validate_unlock_args()
2783 !lkb->lkb_wait_type) { in validate_unlock_args()
2784 rv = -EBUSY; in validate_unlock_args()
2788 switch (lkb->lkb_wait_type) { in validate_unlock_args()
2791 set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2792 rv = -EBUSY; in validate_unlock_args()
2802 /* do we need to allow a force-unlock if there's a normal unlock in validate_unlock_args()
2804 fail such that we'd want to send a force-unlock to be sure? */ in validate_unlock_args()
2806 if (args->flags & DLM_LKF_FORCEUNLOCK) { in validate_unlock_args()
2807 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK) in validate_unlock_args()
2813 if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2814 set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2815 rv = -EBUSY; in validate_unlock_args()
2819 switch (lkb->lkb_wait_type) { in validate_unlock_args()
2822 set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2823 rv = -EBUSY; in validate_unlock_args()
2833 lkb->lkb_exflags |= args->flags; in validate_unlock_args()
2835 lkb->lkb_astparam = args->astparam; in validate_unlock_args()
2841 case -EINVAL: in validate_unlock_args()
2845 lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, in validate_unlock_args()
2846 args->flags, lkb->lkb_wait_type, in validate_unlock_args()
2847 lkb->lkb_resource->res_name); in validate_unlock_args()
2851 lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, in validate_unlock_args()
2852 args->flags, lkb->lkb_wait_type, in validate_unlock_args()
2853 lkb->lkb_resource->res_name); in validate_unlock_args()
2878 error = -EINPROGRESS; in do_request()
2883 error = -EAGAIN; in do_request()
2884 queue_cast(r, lkb, -EAGAIN); in do_request()
2893 case -EAGAIN: in do_request_effects()
2897 case -EINPROGRESS: in do_request_effects()
2920 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { in do_convert()
2923 queue_cast(r, lkb, -EDEADLK); in do_convert()
2924 error = -EDEADLK; in do_convert()
2929 to NL, and left us on the granted queue. This auto-demotion in do_convert()
2945 error = -EINPROGRESS; in do_convert()
2951 error = -EAGAIN; in do_convert()
2952 queue_cast(r, lkb, -EAGAIN); in do_convert()
2965 case -EAGAIN: in do_convert_effects()
2969 case -EINPROGRESS: in do_convert_effects()
2978 queue_cast(r, lkb, -DLM_EUNLOCK); in do_unlock()
2979 return -DLM_EUNLOCK; in do_unlock()
2988 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2996 queue_cast(r, lkb, -DLM_ECANCEL); in do_cancel()
2997 return -DLM_ECANCEL; in do_cancel()
3123 lkb->lkb_lksb->sb_lkid = lkb->lkb_id; in request_lock()
3138 r = lkb->lkb_resource; in convert_lock()
3160 r = lkb->lkb_resource; in unlock_lock()
3182 r = lkb->lkb_resource; in cancel_lock()
3220 return -EINVAL; in dlm_lock()
3225 error = find_lkb(ls, lksb->sb_lkid, &lkb); in dlm_lock()
3244 if (error == -EINPROGRESS) in dlm_lock()
3251 if (error == -EAGAIN || error == -EDEADLK) in dlm_lock()
3272 return -EINVAL; in dlm_unlock()
3291 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL) in dlm_unlock()
3293 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK))) in dlm_unlock()
3337 /* get_buffer gives us a message handle (mh) that we need to in _create_message()
3343 return -ENOBUFS; in _create_message()
3347 ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); in _create_message()
3348 ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); in _create_message()
3349 ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); in _create_message()
3350 ms->m_header.h_length = cpu_to_le16(mb_len); in _create_message()
3351 ms->m_header.h_cmd = DLM_MSG; in _create_message()
3353 ms->m_type = cpu_to_le32(mstype); in _create_message()
3372 mb_len += r->res_length; in create_message()
3379 if (lkb && lkb->lkb_lvbptr && (lkb->lkb_exflags & DLM_LKF_VALBLK)) in create_message()
3380 mb_len += r->res_ls->ls_lvblen; in create_message()
3384 return _create_message(r->res_ls, mb_len, to_nodeid, mstype, in create_message()
3401 ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid); in send_args()
3402 ms->m_pid = cpu_to_le32(lkb->lkb_ownpid); in send_args()
3403 ms->m_lkid = cpu_to_le32(lkb->lkb_id); in send_args()
3404 ms->m_remid = cpu_to_le32(lkb->lkb_remid); in send_args()
3405 ms->m_exflags = cpu_to_le32(lkb->lkb_exflags); in send_args()
3406 ms->m_sbflags = cpu_to_le32(dlm_sbflags_val(lkb)); in send_args()
3407 ms->m_flags = cpu_to_le32(dlm_dflags_val(lkb)); in send_args()
3408 ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); in send_args()
3409 ms->m_status = cpu_to_le32(lkb->lkb_status); in send_args()
3410 ms->m_grmode = cpu_to_le32(lkb->lkb_grmode); in send_args()
3411 ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode); in send_args()
3412 ms->m_hash = cpu_to_le32(r->res_hash); in send_args()
3417 if (lkb->lkb_bastfn) in send_args()
3418 ms->m_asts |= cpu_to_le32(DLM_CB_BAST); in send_args()
3419 if (lkb->lkb_astfn) in send_args()
3420 ms->m_asts |= cpu_to_le32(DLM_CB_CAST); in send_args()
3425 switch (ms->m_type) { in send_args()
3428 memcpy(ms->m_extra, r->res_name, r->res_length); in send_args()
3435 if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK)) in send_args()
3437 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); in send_args()
3448 to_nodeid = r->res_nodeid; in send_common()
3460 error = send_message(mh, ms, r->res_name, r->res_length); in send_common()
3484 r->res_ls->ls_local_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); in send_convert()
3485 r->res_ls->ls_local_ms.m_result = 0; in send_convert()
3486 __receive_convert_reply(r, lkb, &r->res_ls->ls_local_ms, true); in send_convert()
3512 to_nodeid = lkb->lkb_nodeid; in send_grant()
3521 ms->m_result = 0; in send_grant()
3523 error = send_message(mh, ms, r->res_name, r->res_length); in send_grant()
3534 to_nodeid = lkb->lkb_nodeid; in send_bast()
3543 ms->m_bastmode = cpu_to_le32(mode); in send_bast()
3545 error = send_message(mh, ms, r->res_name, r->res_length); in send_bast()
3569 error = send_message(mh, ms, r->res_name, r->res_length); in send_lookup()
3592 memcpy(ms->m_extra, r->res_name, r->res_length); in send_remove()
3593 ms->m_hash = cpu_to_le32(r->res_hash); in send_remove()
3595 error = send_message(mh, ms, r->res_name, r->res_length); in send_remove()
3607 to_nodeid = lkb->lkb_nodeid; in send_common_reply()
3615 ms->m_result = cpu_to_le32(to_dlm_errno(rv)); in send_common_reply()
3617 error = send_message(mh, ms, r->res_name, r->res_length); in send_common_reply()
3646 struct dlm_rsb *r = &ls->ls_local_rsb; in send_lookup_reply()
3649 int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid); in send_lookup_reply()
3656 ms->m_lkid = ms_in->m_lkid; in send_lookup_reply()
3657 ms->m_result = cpu_to_le32(to_dlm_errno(rv)); in send_lookup_reply()
3658 ms->m_nodeid = cpu_to_le32(ret_nodeid); in send_lookup_reply()
3660 error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in)); in send_lookup_reply()
3671 lkb->lkb_exflags = le32_to_cpu(ms->m_exflags); in receive_flags()
3672 dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); in receive_flags()
3673 dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); in receive_flags()
3683 dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); in receive_flags_reply()
3684 dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); in receive_flags_reply()
3689 return (le16_to_cpu(ms->m_header.h_length) - in receive_extralen()
3698 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_lvb()
3699 if (!lkb->lkb_lvbptr) in receive_lvb()
3700 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_lvb()
3701 if (!lkb->lkb_lvbptr) in receive_lvb()
3702 return -ENOMEM; in receive_lvb()
3704 if (len > ls->ls_lvblen) in receive_lvb()
3705 len = ls->ls_lvblen; in receive_lvb()
3706 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); in receive_lvb()
3724 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request_args()
3725 lkb->lkb_ownpid = le32_to_cpu(ms->m_pid); in receive_request_args()
3726 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in receive_request_args()
3727 lkb->lkb_grmode = DLM_LOCK_IV; in receive_request_args()
3728 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); in receive_request_args()
3730 lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL; in receive_request_args()
3731 lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL; in receive_request_args()
3733 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_request_args()
3735 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_request_args()
3736 if (!lkb->lkb_lvbptr) in receive_request_args()
3737 return -ENOMEM; in receive_request_args()
3746 if (lkb->lkb_status != DLM_LKSTS_GRANTED) in receive_convert_args()
3747 return -EBUSY; in receive_convert_args()
3750 return -ENOMEM; in receive_convert_args()
3752 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); in receive_convert_args()
3753 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); in receive_convert_args()
3762 return -ENOMEM; in receive_unlock_args()
3766 /* We fill in the local-lkb fields with the info that send_xxxx_reply()
3771 struct dlm_lkb *lkb = &ls->ls_local_lkb; in setup_local_lkb()
3772 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in setup_local_lkb()
3773 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in setup_local_lkb()
3781 int from = le32_to_cpu(ms->m_header.h_nodeid); in validate_message()
3785 if (ms->m_flags & cpu_to_le32(BIT(DLM_DFL_USER_BIT)) && in validate_message()
3786 !test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { in validate_message()
3787 log_error(lkb->lkb_resource->res_ls, in validate_message()
3789 error = -EINVAL; in validate_message()
3793 switch (ms->m_type) { in validate_message()
3797 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from) in validate_message()
3798 error = -EINVAL; in validate_message()
3806 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from) in validate_message()
3807 error = -EINVAL; in validate_message()
3812 error = -EINVAL; in validate_message()
3813 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from) in validate_message()
3814 error = -EINVAL; in validate_message()
3818 error = -EINVAL; in validate_message()
3823 log_error(lkb->lkb_resource->res_ls, in validate_message()
3825 le32_to_cpu(ms->m_type), from, lkb->lkb_id, in validate_message()
3826 lkb->lkb_remid, dlm_iflags_val(lkb), in validate_message()
3827 lkb->lkb_nodeid); in validate_message()
3838 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request()
3845 set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in receive_request()
3853 for this rsb or not, so if the master sends us a request, we should in receive_request()
3856 node sends us a request for the rsb. */ in receive_request()
3860 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid, in receive_request()
3869 if (r->res_master_nodeid != dlm_our_nodeid()) { in receive_request()
3887 if (error == -EINPROGRESS) in receive_request()
3897 ENOTBLK request failures when the lookup reply designating us in receive_request()
3900 if (error != -ENOTBLK) { in receive_request()
3902 le32_to_cpu(ms->m_lkid), from_nodeid, error); in receive_request()
3906 send_request_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_request()
3916 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_convert()
3920 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { in receive_convert()
3922 "remote %d %x", lkb->lkb_id, lkb->lkb_remid, in receive_convert()
3923 (unsigned long long)lkb->lkb_recover_seq, in receive_convert()
3924 le32_to_cpu(ms->m_header.h_nodeid), in receive_convert()
3925 le32_to_cpu(ms->m_lkid)); in receive_convert()
3926 error = -ENOENT; in receive_convert()
3931 r = lkb->lkb_resource; in receive_convert()
3962 send_convert_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_convert()
3972 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_unlock()
3976 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { in receive_unlock()
3978 lkb->lkb_id, lkb->lkb_remid, in receive_unlock()
3979 le32_to_cpu(ms->m_header.h_nodeid), in receive_unlock()
3980 le32_to_cpu(ms->m_lkid)); in receive_unlock()
3981 error = -ENOENT; in receive_unlock()
3986 r = lkb->lkb_resource; in receive_unlock()
4014 send_unlock_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_unlock()
4024 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_cancel()
4030 r = lkb->lkb_resource; in receive_cancel()
4050 send_cancel_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_cancel()
4060 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_grant()
4064 r = lkb->lkb_resource; in receive_grant()
4091 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_bast()
4095 r = lkb->lkb_resource; in receive_bast()
4104 queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode)); in receive_bast()
4105 lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode); in receive_bast()
4117 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_lookup()
4122 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0, in receive_lookup()
4140 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_remove()
4150 dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash)); in receive_remove()
4167 memcpy(name, ms->m_extra, len); in receive_remove()
4170 b = hash & (ls->ls_rsbtbl_size - 1); in receive_remove()
4172 spin_lock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4174 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); in receive_remove()
4177 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); in receive_remove()
4182 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4185 if (r->res_master_nodeid != from_nodeid) { in receive_remove()
4188 from_nodeid, r->res_master_nodeid); in receive_remove()
4190 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4195 from_nodeid, r->res_master_nodeid, r->res_first_lkid, in receive_remove()
4197 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4201 if (r->res_master_nodeid != from_nodeid) { in receive_remove()
4203 from_nodeid, r->res_master_nodeid); in receive_remove()
4205 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4209 if (kref_put(&r->res_ref, kill_rsb)) { in receive_remove()
4210 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); in receive_remove()
4211 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4217 spin_unlock(&ls->ls_rsbtbl[b].lock); in receive_remove()
4223 do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid)); in receive_purge()
4232 int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request_reply()
4234 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_request_reply()
4238 r = lkb->lkb_resource; in receive_request_reply()
4246 mstype = lkb->lkb_wait_type; in receive_request_reply()
4250 lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid), in receive_request_reply()
4251 from_dlm_errno(le32_to_cpu(ms->m_result))); in receive_request_reply()
4259 r->res_master_nodeid = from_nodeid; in receive_request_reply()
4260 r->res_nodeid = from_nodeid; in receive_request_reply()
4261 lkb->lkb_nodeid = from_nodeid; in receive_request_reply()
4265 result = from_dlm_errno(le32_to_cpu(ms->m_result)); in receive_request_reply()
4268 case -EAGAIN: in receive_request_reply()
4270 queue_cast(r, lkb, -EAGAIN); in receive_request_reply()
4271 confirm_master(r, -EAGAIN); in receive_request_reply()
4275 case -EINPROGRESS: in receive_request_reply()
4279 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in receive_request_reply()
4291 case -EBADR: in receive_request_reply()
4292 case -ENOTBLK: in receive_request_reply()
4295 "master %d dir %d first %x %s", lkb->lkb_id, in receive_request_reply()
4296 from_nodeid, result, r->res_master_nodeid, in receive_request_reply()
4297 r->res_dir_nodeid, r->res_first_lkid, r->res_name); in receive_request_reply()
4299 if (r->res_dir_nodeid != dlm_our_nodeid() && in receive_request_reply()
4300 r->res_master_nodeid != dlm_our_nodeid()) { in receive_request_reply()
4301 /* cause _request_lock->set_master->send_lookup */ in receive_request_reply()
4302 r->res_master_nodeid = 0; in receive_request_reply()
4303 r->res_nodeid = -1; in receive_request_reply()
4304 lkb->lkb_nodeid = -1; in receive_request_reply()
4315 if (r->res_master_nodeid == dlm_our_nodeid()) in receive_request_reply()
4322 lkb->lkb_id, result); in receive_request_reply()
4325 if ((result == 0 || result == -EINPROGRESS) && in receive_request_reply()
4326 test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { in receive_request_reply()
4328 lkb->lkb_id, result); in receive_request_reply()
4329 clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in receive_request_reply()
4331 } else if ((result == -EINPROGRESS) && in receive_request_reply()
4333 &lkb->lkb_iflags)) { in receive_request_reply()
4334 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id); in receive_request_reply()
4335 clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in receive_request_reply()
4338 clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in receive_request_reply()
4339 clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in receive_request_reply()
4352 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in __receive_convert_reply()
4353 case -EAGAIN: in __receive_convert_reply()
4355 queue_cast(r, lkb, -EAGAIN); in __receive_convert_reply()
4358 case -EDEADLK: in __receive_convert_reply()
4361 queue_cast(r, lkb, -EDEADLK); in __receive_convert_reply()
4364 case -EINPROGRESS: in __receive_convert_reply()
4383 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d", in __receive_convert_reply()
4384 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), in __receive_convert_reply()
4385 le32_to_cpu(ms->m_lkid), in __receive_convert_reply()
4386 from_dlm_errno(le32_to_cpu(ms->m_result))); in __receive_convert_reply()
4395 struct dlm_rsb *r = lkb->lkb_resource; in _receive_convert_reply()
4422 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_convert_reply()
4434 struct dlm_rsb *r = lkb->lkb_resource; in _receive_unlock_reply()
4451 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in _receive_unlock_reply()
4452 case -DLM_EUNLOCK: in _receive_unlock_reply()
4455 queue_cast(r, lkb, -DLM_EUNLOCK); in _receive_unlock_reply()
4457 case -ENOENT: in _receive_unlock_reply()
4460 log_error(r->res_ls, "receive_unlock_reply %x error %d", in _receive_unlock_reply()
4461 lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_unlock_reply()
4474 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_unlock_reply()
4486 struct dlm_rsb *r = lkb->lkb_resource; in _receive_cancel_reply()
4503 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in _receive_cancel_reply()
4504 case -DLM_ECANCEL: in _receive_cancel_reply()
4507 queue_cast(r, lkb, -DLM_ECANCEL); in _receive_cancel_reply()
4512 log_error(r->res_ls, "receive_cancel_reply %x error %d", in _receive_cancel_reply()
4513 lkb->lkb_id, in _receive_cancel_reply()
4514 from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_cancel_reply()
4527 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_cancel_reply()
4544 error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb); in receive_lookup_reply()
4547 le32_to_cpu(ms->m_lkid)); in receive_lookup_reply()
4551 /* ms->m_result is the value returned by dlm_master_lookup on dir node in receive_lookup_reply()
4552 FIXME: will a non-zero error ever be returned? */ in receive_lookup_reply()
4554 r = lkb->lkb_resource; in receive_lookup_reply()
4562 ret_nodeid = le32_to_cpu(ms->m_nodeid); in receive_lookup_reply()
4570 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) { in receive_lookup_reply()
4574 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), in receive_lookup_reply()
4575 ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid, in receive_lookup_reply()
4576 dlm_our_nodeid(), r->res_first_lkid, r->res_name); in receive_lookup_reply()
4580 r->res_master_nodeid = ret_nodeid; in receive_lookup_reply()
4581 r->res_nodeid = 0; in receive_lookup_reply()
4583 r->res_first_lkid = 0; in receive_lookup_reply()
4584 } else if (ret_nodeid == -1) { in receive_lookup_reply()
4587 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid)); in receive_lookup_reply()
4588 r->res_master_nodeid = 0; in receive_lookup_reply()
4589 r->res_nodeid = -1; in receive_lookup_reply()
4590 lkb->lkb_nodeid = -1; in receive_lookup_reply()
4593 r->res_master_nodeid = ret_nodeid; in receive_lookup_reply()
4594 r->res_nodeid = ret_nodeid; in receive_lookup_reply()
4599 lkb->lkb_id, dlm_iflags_val(lkb)); in receive_lookup_reply()
4621 if (WARN_ON_ONCE(!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid)))) { in _receive_message()
4622 log_limit(ls, "receive %d from non-member %d %x %x %d", in _receive_message()
4623 le32_to_cpu(ms->m_type), in _receive_message()
4624 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4625 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), in _receive_message()
4626 from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_message()
4630 switch (ms->m_type) { in _receive_message()
4705 le32_to_cpu(ms->m_type)); in _receive_message()
4719 if (error == -ENOENT && noent) { in _receive_message()
4721 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), in _receive_message()
4722 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4723 le32_to_cpu(ms->m_lkid), saved_seq); in _receive_message()
4724 } else if (error == -ENOENT) { in _receive_message()
4726 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), in _receive_message()
4727 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4728 le32_to_cpu(ms->m_lkid), saved_seq); in _receive_message()
4730 if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT)) in _receive_message()
4731 dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash)); in _receive_message()
4734 if (error == -EINVAL) { in _receive_message()
4737 le32_to_cpu(ms->m_type), in _receive_message()
4738 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4739 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), in _receive_message()
4757 other nodes may still be sending us messages from the in dlm_receive_message()
4759 if (WARN_ON_ONCE(!ls->ls_generation)) { in dlm_receive_message()
4761 le32_to_cpu(ms->m_type), nodeid); in dlm_receive_message()
4788 const struct dlm_header *hd = &p->header; in dlm_receive_buffer()
4792 switch (hd->h_cmd) { in dlm_receive_buffer()
4794 type = le32_to_cpu(p->message.m_type); in dlm_receive_buffer()
4797 type = le32_to_cpu(p->rcom.rc_type); in dlm_receive_buffer()
4800 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); in dlm_receive_buffer()
4804 if (le32_to_cpu(hd->h_nodeid) != nodeid) { in dlm_receive_buffer()
4806 le32_to_cpu(hd->h_nodeid), nodeid, in dlm_receive_buffer()
4807 le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
4811 ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
4816 le32_to_cpu(hd->u.h_lockspace), nodeid, in dlm_receive_buffer()
4817 hd->h_cmd, type); in dlm_receive_buffer()
4820 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) in dlm_receive_buffer()
4821 dlm_send_ls_not_ready(nodeid, &p->rcom); in dlm_receive_buffer()
4828 down_read(&ls->ls_recv_active); in dlm_receive_buffer()
4829 if (hd->h_cmd == DLM_MSG) in dlm_receive_buffer()
4830 dlm_receive_message(ls, &p->message, nodeid); in dlm_receive_buffer()
4831 else if (hd->h_cmd == DLM_RCOM) in dlm_receive_buffer()
4832 dlm_receive_rcom(ls, &p->rcom, nodeid); in dlm_receive_buffer()
4835 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
4836 up_read(&ls->ls_recv_active); in dlm_receive_buffer()
4847 ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); in recover_convert_waiter()
4848 ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS)); in recover_convert_waiter()
4849 ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in recover_convert_waiter()
4853 lkb->lkb_grmode = DLM_LOCK_IV; in recover_convert_waiter()
4854 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT); in recover_convert_waiter()
4857 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { in recover_convert_waiter()
4858 set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in recover_convert_waiter()
4861 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down in recover_convert_waiter()
4874 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid)) in waiter_needs_recovery()
4882 dead node. Requests and up-conversions we flag to be resent after
4883 recovery. Down-conversions can just be completed with a fake reply like
4897 mutex_lock(&ls->ls_waiters_mutex); in dlm_recover_waiters_pre()
4899 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { in dlm_recover_waiters_pre()
4901 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource); in dlm_recover_waiters_pre()
4906 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) { in dlm_recover_waiters_pre()
4909 lkb->lkb_id, in dlm_recover_waiters_pre()
4910 lkb->lkb_remid, in dlm_recover_waiters_pre()
4911 lkb->lkb_wait_type, in dlm_recover_waiters_pre()
4912 lkb->lkb_resource->res_nodeid, in dlm_recover_waiters_pre()
4913 lkb->lkb_nodeid, in dlm_recover_waiters_pre()
4914 lkb->lkb_wait_nodeid, in dlm_recover_waiters_pre()
4921 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) { in dlm_recover_waiters_pre()
4922 set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in dlm_recover_waiters_pre()
4929 wait_type = lkb->lkb_wait_type; in dlm_recover_waiters_pre()
4930 local_unlock_result = -DLM_EUNLOCK; in dlm_recover_waiters_pre()
4931 local_cancel_result = -DLM_ECANCEL; in dlm_recover_waiters_pre()
4941 if (lkb->lkb_grmode == DLM_LOCK_IV) in dlm_recover_waiters_pre()
4946 if (lkb->lkb_grmode == DLM_LOCK_IV) in dlm_recover_waiters_pre()
4947 local_unlock_result = -ENOENT; in dlm_recover_waiters_pre()
4951 lkb->lkb_id, dlm_iflags_val(lkb), wait_type, in dlm_recover_waiters_pre()
4958 set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in dlm_recover_waiters_pre()
4968 ms_local->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY); in dlm_recover_waiters_pre()
4969 ms_local->m_result = cpu_to_le32(to_dlm_errno(local_unlock_result)); in dlm_recover_waiters_pre()
4970 ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in dlm_recover_waiters_pre()
4978 ms_local->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY); in dlm_recover_waiters_pre()
4979 ms_local->m_result = cpu_to_le32(to_dlm_errno(local_cancel_result)); in dlm_recover_waiters_pre()
4980 ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in dlm_recover_waiters_pre()
4987 lkb->lkb_wait_type, wait_type); in dlm_recover_waiters_pre()
4991 mutex_unlock(&ls->ls_waiters_mutex); in dlm_recover_waiters_pre()
4999 mutex_lock(&ls->ls_waiters_mutex); in find_resend_waiter()
5000 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { in find_resend_waiter()
5001 if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { in find_resend_waiter()
5007 mutex_unlock(&ls->ls_waiters_mutex); in find_resend_waiter()
5013 master or dir-node for r. Processing the lkb may result in it being placed
5032 int error = 0, mstype, err, oc, ou; in dlm_recover_waiters_post() local
5037 error = -EINTR; in dlm_recover_waiters_post()
5045 r = lkb->lkb_resource; in dlm_recover_waiters_post()
5049 mstype = lkb->lkb_wait_type; in dlm_recover_waiters_post()
5050 oc = test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, in dlm_recover_waiters_post()
5051 &lkb->lkb_iflags); in dlm_recover_waiters_post()
5053 &lkb->lkb_iflags); in dlm_recover_waiters_post()
5058 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype, in dlm_recover_waiters_post()
5059 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, in dlm_recover_waiters_post()
5060 dlm_dir_nodeid(r), oc, ou); in dlm_recover_waiters_post()
5066 clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in dlm_recover_waiters_post()
5067 lkb->lkb_wait_type = 0; in dlm_recover_waiters_post()
5071 while (!atomic_dec_and_test(&lkb->lkb_wait_count)) in dlm_recover_waiters_post()
5074 mutex_lock(&ls->ls_waiters_mutex); in dlm_recover_waiters_post()
5075 list_del_init(&lkb->lkb_wait_reply); in dlm_recover_waiters_post()
5076 mutex_unlock(&ls->ls_waiters_mutex); in dlm_recover_waiters_post()
5078 if (oc || ou) { in dlm_recover_waiters_post()
5083 queue_cast(r, lkb, ou ? -DLM_EUNLOCK : in dlm_recover_waiters_post()
5084 -DLM_ECANCEL); in dlm_recover_waiters_post()
5088 if (oc) { in dlm_recover_waiters_post()
5089 queue_cast(r, lkb, -DLM_ECANCEL); in dlm_recover_waiters_post()
5091 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK; in dlm_recover_waiters_post()
5117 lkb->lkb_id, mstype, r->res_nodeid, in dlm_recover_waiters_post()
5118 dlm_dir_nodeid(r), oc, ou); in dlm_recover_waiters_post()
5140 if (lkb->lkb_recover_seq == ls->ls_recover_seq) in purge_mstcpy_list()
5153 struct dlm_ls *ls = r->res_ls; in dlm_purge_mstcpy_locks()
5155 purge_mstcpy_list(ls, r, &r->res_grantqueue); in dlm_purge_mstcpy_locks()
5156 purge_mstcpy_list(ls, r, &r->res_convertqueue); in dlm_purge_mstcpy_locks()
5157 purge_mstcpy_list(ls, r, &r->res_waitqueue); in dlm_purge_mstcpy_locks()
5170 if ((lkb->lkb_nodeid == nodeid_gone) || in purge_dead_list()
5171 dlm_is_removed(ls, lkb->lkb_nodeid)) { in purge_dead_list()
5175 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) && in purge_dead_list()
5176 (lkb->lkb_grmode >= DLM_LOCK_PW)) { in purge_dead_list()
5206 list_for_each_entry(memb, &ls->ls_nodes_gone, list) { in dlm_recover_purge()
5208 nodeid_gone = memb->nodeid; in dlm_recover_purge()
5214 down_write(&ls->ls_root_sem); in dlm_recover_purge()
5215 list_for_each_entry(r, &ls->ls_root_list, res_root_list) { in dlm_recover_purge()
5219 purge_dead_list(ls, r, &r->res_grantqueue, in dlm_recover_purge()
5221 purge_dead_list(ls, r, &r->res_convertqueue, in dlm_recover_purge()
5223 purge_dead_list(ls, r, &r->res_waitqueue, in dlm_recover_purge()
5230 up_write(&ls->ls_root_sem); in dlm_recover_purge()
5242 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5243 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { in find_grant_rsb()
5253 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5256 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5268 * Simplest would be to go through each master rsb and check for non-empty
5288 if (bucket == ls->ls_rsbtbl_size - 1) in dlm_recover_grant()
5317 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid) in search_remid_list()
5328 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid); in search_remid()
5331 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid); in search_remid()
5334 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid); in search_remid()
5344 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in receive_rcom_lock_args()
5346 lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); in receive_rcom_lock_args()
5347 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); in receive_rcom_lock_args()
5348 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); in receive_rcom_lock_args()
5349 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); in receive_rcom_lock_args()
5350 dlm_set_dflags_val(lkb, le32_to_cpu(rl->rl_flags)); in receive_rcom_lock_args()
5351 set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in receive_rcom_lock_args()
5352 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); in receive_rcom_lock_args()
5353 lkb->lkb_rqmode = rl->rl_rqmode; in receive_rcom_lock_args()
5354 lkb->lkb_grmode = rl->rl_grmode; in receive_rcom_lock_args()
5357 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; in receive_rcom_lock_args()
5358 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; in receive_rcom_lock_args()
5360 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_rcom_lock_args()
5361 int lvblen = le16_to_cpu(rc->rc_header.h_length) - in receive_rcom_lock_args()
5362 sizeof(struct dlm_rcom) - sizeof(struct rcom_lock); in receive_rcom_lock_args()
5363 if (lvblen > ls->ls_lvblen) in receive_rcom_lock_args()
5364 return -EINVAL; in receive_rcom_lock_args()
5365 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_rcom_lock_args()
5366 if (!lkb->lkb_lvbptr) in receive_rcom_lock_args()
5367 return -ENOMEM; in receive_rcom_lock_args()
5368 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); in receive_rcom_lock_args()
5375 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && in receive_rcom_lock_args()
5377 rl->rl_status = DLM_LKSTS_CONVERT; in receive_rcom_lock_args()
5378 lkb->lkb_grmode = DLM_LOCK_IV; in receive_rcom_lock_args()
5395 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in dlm_recover_master_copy()
5399 int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); in dlm_recover_master_copy()
5403 *rl_remid = rl->rl_remid; in dlm_recover_master_copy()
5405 if (rl->rl_parent_lkid) { in dlm_recover_master_copy()
5406 error = -EOPNOTSUPP; in dlm_recover_master_copy()
5410 remid = le32_to_cpu(rl->rl_lkid); in dlm_recover_master_copy()
5414 recovery of locks on another node, so one node can send us MSTCPY in dlm_recover_master_copy()
5420 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), in dlm_recover_master_copy()
5430 error = -EBADR; in dlm_recover_master_copy()
5436 error = -EEXIST; in dlm_recover_master_copy()
5451 add_lkb(r, lkb, rl->rl_status); in dlm_recover_master_copy()
5452 ls->ls_recover_locks_in++; in dlm_recover_master_copy()
5454 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) in dlm_recover_master_copy()
5459 saving in its process-copy lkb */ in dlm_recover_master_copy()
5460 *rl_remid = cpu_to_le32(lkb->lkb_id); in dlm_recover_master_copy()
5462 lkb->lkb_recover_seq = ls->ls_recover_seq; in dlm_recover_master_copy()
5468 if (error && error != -EEXIST) in dlm_recover_master_copy()
5479 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in dlm_recover_process_copy()
5485 lkid = le32_to_cpu(rl->rl_lkid); in dlm_recover_process_copy()
5486 remid = le32_to_cpu(rl->rl_remid); in dlm_recover_process_copy()
5487 result = le32_to_cpu(rl->rl_result); in dlm_recover_process_copy()
5492 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5497 r = lkb->lkb_resource; in dlm_recover_process_copy()
5503 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5509 return -EINVAL; in dlm_recover_process_copy()
5513 case -EBADR: in dlm_recover_process_copy()
5519 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5524 case -EEXIST: in dlm_recover_process_copy()
5526 lkb->lkb_remid = remid; in dlm_recover_process_copy()
5530 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5564 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); in dlm_user_request()
5565 if (!ua->lksb.sb_lvbptr) { in dlm_user_request()
5567 error = -ENOMEM; in dlm_user_request()
5571 error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua, in dlm_user_request()
5574 kfree(ua->lksb.sb_lvbptr); in dlm_user_request()
5575 ua->lksb.sb_lvbptr = NULL; in dlm_user_request()
5583 set_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags); in dlm_user_request()
5589 case -EINPROGRESS: in dlm_user_request()
5592 case -EAGAIN: in dlm_user_request()
5599 /* add this new lkb to the per-process list of locks */ in dlm_user_request()
5600 spin_lock(&ua->proc->locks_spin); in dlm_user_request()
5602 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); in dlm_user_request()
5603 spin_unlock(&ua->proc->locks_spin); in dlm_user_request()
5633 ua = lkb->lkb_ua; in dlm_user_convert()
5635 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { in dlm_user_convert()
5636 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); in dlm_user_convert()
5637 if (!ua->lksb.sb_lvbptr) { in dlm_user_convert()
5638 error = -ENOMEM; in dlm_user_convert()
5642 if (lvb_in && ua->lksb.sb_lvbptr) in dlm_user_convert()
5643 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); in dlm_user_convert()
5645 ua->xid = ua_tmp->xid; in dlm_user_convert()
5646 ua->castparam = ua_tmp->castparam; in dlm_user_convert()
5647 ua->castaddr = ua_tmp->castaddr; in dlm_user_convert()
5648 ua->bastparam = ua_tmp->bastparam; in dlm_user_convert()
5649 ua->bastaddr = ua_tmp->bastaddr; in dlm_user_convert()
5650 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_convert()
5652 error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua, in dlm_user_convert()
5659 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK) in dlm_user_convert()
5685 mutex_lock(&ls->ls_orphans_mutex); in dlm_user_adopt_orphan()
5686 list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { in dlm_user_adopt_orphan()
5687 if (iter->lkb_resource->res_length != namelen) in dlm_user_adopt_orphan()
5689 if (memcmp(iter->lkb_resource->res_name, name, namelen)) in dlm_user_adopt_orphan()
5691 if (iter->lkb_grmode != mode) { in dlm_user_adopt_orphan()
5697 list_del_init(&iter->lkb_ownqueue); in dlm_user_adopt_orphan()
5698 clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags); in dlm_user_adopt_orphan()
5699 *lkid = iter->lkb_id; in dlm_user_adopt_orphan()
5702 mutex_unlock(&ls->ls_orphans_mutex); in dlm_user_adopt_orphan()
5705 rv = -EAGAIN; in dlm_user_adopt_orphan()
5710 rv = -ENOENT; in dlm_user_adopt_orphan()
5714 lkb->lkb_exflags = flags; in dlm_user_adopt_orphan()
5715 lkb->lkb_ownpid = (int) current->pid; in dlm_user_adopt_orphan()
5717 ua = lkb->lkb_ua; in dlm_user_adopt_orphan()
5719 ua->proc = ua_tmp->proc; in dlm_user_adopt_orphan()
5720 ua->xid = ua_tmp->xid; in dlm_user_adopt_orphan()
5721 ua->castparam = ua_tmp->castparam; in dlm_user_adopt_orphan()
5722 ua->castaddr = ua_tmp->castaddr; in dlm_user_adopt_orphan()
5723 ua->bastparam = ua_tmp->bastparam; in dlm_user_adopt_orphan()
5724 ua->bastaddr = ua_tmp->bastaddr; in dlm_user_adopt_orphan()
5725 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_adopt_orphan()
5733 spin_lock(&ua->proc->locks_spin); in dlm_user_adopt_orphan()
5734 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); in dlm_user_adopt_orphan()
5735 spin_unlock(&ua->proc->locks_spin); in dlm_user_adopt_orphan()
5757 ua = lkb->lkb_ua; in dlm_user_unlock()
5759 if (lvb_in && ua->lksb.sb_lvbptr) in dlm_user_unlock()
5760 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); in dlm_user_unlock()
5761 if (ua_tmp->castparam) in dlm_user_unlock()
5762 ua->castparam = ua_tmp->castparam; in dlm_user_unlock()
5763 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_unlock()
5771 if (error == -DLM_EUNLOCK) in dlm_user_unlock()
5774 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK)) in dlm_user_unlock()
5779 spin_lock(&ua->proc->locks_spin); in dlm_user_unlock()
5781 if (!list_empty(&lkb->lkb_ownqueue)) in dlm_user_unlock()
5782 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); in dlm_user_unlock()
5783 spin_unlock(&ua->proc->locks_spin); in dlm_user_unlock()
5809 ua = lkb->lkb_ua; in dlm_user_cancel()
5810 if (ua_tmp->castparam) in dlm_user_cancel()
5811 ua->castparam = ua_tmp->castparam; in dlm_user_cancel()
5812 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_cancel()
5820 if (error == -DLM_ECANCEL) in dlm_user_cancel()
5823 if (error == -EBUSY) in dlm_user_cancel()
5850 ua = lkb->lkb_ua; in dlm_user_deadlock()
5858 r = lkb->lkb_resource; in dlm_user_deadlock()
5865 set_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags); in dlm_user_deadlock()
5872 if (error == -DLM_ECANCEL) in dlm_user_deadlock()
5875 if (error == -EBUSY) in dlm_user_deadlock()
5894 mutex_lock(&ls->ls_orphans_mutex); in orphan_proc_lock()
5895 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); in orphan_proc_lock()
5896 mutex_unlock(&ls->ls_orphans_mutex); in orphan_proc_lock()
5898 set_unlock_args(0, lkb->lkb_ua, &args); in orphan_proc_lock()
5901 if (error == -DLM_ECANCEL) in orphan_proc_lock()
5917 lkb->lkb_ua, &args); in unlock_proc_lock()
5920 if (error == -DLM_EUNLOCK) in unlock_proc_lock()
5934 spin_lock(&ls->ls_clear_proc_locks); in del_proc_lock()
5935 if (list_empty(&proc->locks)) in del_proc_lock()
5938 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); in del_proc_lock()
5939 list_del_init(&lkb->lkb_ownqueue); in del_proc_lock()
5941 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) in del_proc_lock()
5942 set_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags); in del_proc_lock()
5944 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in del_proc_lock()
5946 spin_unlock(&ls->ls_clear_proc_locks); in del_proc_lock()
5951 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
5954 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
5955 list, and no more device_writes should add lkb's to proc->locks list; so we
5957 device reads/writes/closes are serialized -- FIXME: we may need to serialize
5970 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) in dlm_clear_proc_locks()
5975 /* this removes the reference for the proc->locks list in dlm_clear_proc_locks()
5982 spin_lock(&ls->ls_clear_proc_locks); in dlm_clear_proc_locks()
5984 /* in-progress unlocks */ in dlm_clear_proc_locks()
5985 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { in dlm_clear_proc_locks()
5986 list_del_init(&lkb->lkb_ownqueue); in dlm_clear_proc_locks()
5987 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in dlm_clear_proc_locks()
5991 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { in dlm_clear_proc_locks()
5993 list_del_init(&lkb->lkb_cb_list); in dlm_clear_proc_locks()
5997 spin_unlock(&ls->ls_clear_proc_locks); in dlm_clear_proc_locks()
6007 spin_lock(&proc->locks_spin); in purge_proc_locks()
6008 if (!list_empty(&proc->locks)) { in purge_proc_locks()
6009 lkb = list_entry(proc->locks.next, struct dlm_lkb, in purge_proc_locks()
6011 list_del_init(&lkb->lkb_ownqueue); in purge_proc_locks()
6013 spin_unlock(&proc->locks_spin); in purge_proc_locks()
6018 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in purge_proc_locks()
6020 dlm_put_lkb(lkb); /* ref from proc->locks list */ in purge_proc_locks()
6023 spin_lock(&proc->locks_spin); in purge_proc_locks()
6024 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { in purge_proc_locks()
6025 list_del_init(&lkb->lkb_ownqueue); in purge_proc_locks()
6026 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in purge_proc_locks()
6029 spin_unlock(&proc->locks_spin); in purge_proc_locks()
6031 spin_lock(&proc->asts_spin); in purge_proc_locks()
6032 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { in purge_proc_locks()
6034 list_del_init(&lkb->lkb_cb_list); in purge_proc_locks()
6037 spin_unlock(&proc->asts_spin); in purge_proc_locks()
6046 mutex_lock(&ls->ls_orphans_mutex); in do_purge()
6047 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { in do_purge()
6048 if (pid && lkb->lkb_ownpid != pid) in do_purge()
6051 list_del_init(&lkb->lkb_ownqueue); in do_purge()
6054 mutex_unlock(&ls->ls_orphans_mutex); in do_purge()
6067 ms->m_nodeid = cpu_to_le32(nodeid); in send_purge()
6068 ms->m_pid = cpu_to_le32(pid); in send_purge()
6082 if (pid == current->pid) in dlm_user_purge()
6102 return -EOPNOTSUPP; in dlm_debug_add_lkb()
6106 return -ENOMEM; in dlm_debug_add_lkb()
6115 lkb->lkb_nodeid = lkb_nodeid; in dlm_debug_add_lkb()
6116 lkb->lkb_lksb = lksb; in dlm_debug_add_lkb()
6119 lkb->lkb_astparam = (void *)0xDEADBEEF; in dlm_debug_add_lkb()